From 2c4de0d9c44f9ec2a2b24c9c16a1566a2eebb2ac Mon Sep 17 00:00:00 2001 From: John Fulton Date: Tue, 15 Oct 2024 11:21:29 -0400 Subject: [PATCH] Introduce ci_dcn_site role The ci_dcn_site role may be used to deploy DCN sites for testing. Each DCN site is a new EDPM nodeset with a collocated Ceph cluster. Co-authored-by: Sergey Bekkerman --- docs/dictionary/en-custom.txt | 4 + playbooks/dcn.yml | 66 ++++++ roles/ci_dcn_site/README.md | 91 ++++++++ roles/ci_dcn_site/defaults/main.yml | 27 +++ roles/ci_dcn_site/meta/main.yml | 30 +++ roles/ci_dcn_site/tasks/az.yml | 55 +++++ roles/ci_dcn_site/tasks/ceph.yml | 92 ++++++++ roles/ci_dcn_site/tasks/main.yml | 37 +++ roles/ci_dcn_site/tasks/post-ceph.yml | 107 +++++++++ roles/ci_dcn_site/tasks/pre-ceph.yml | 90 +++++++ roles/ci_dcn_site/tasks/set_network_facts.yml | 43 ++++ .../templates/deployment/values.yaml.j2 | 20 ++ .../edpm-pre-ceph/deployment/values.yaml.j2 | 22 ++ .../edpm-pre-ceph/nodeset/values.yaml.j2 | 164 +++++++++++++ .../templates/network-values/values.yaml.j2 | 219 ++++++++++++++++++ .../templates/service-values.yaml.j2 | 160 +++++++++++++ roles/ci_dcn_site/templates/values.yaml.j2 | 63 +++++ .../reproducers/{va-dcn.yml => dt-dcn.yml} | 137 ++++++++++- 18 files changed, 1426 insertions(+), 1 deletion(-) create mode 100644 playbooks/dcn.yml create mode 100644 roles/ci_dcn_site/README.md create mode 100644 roles/ci_dcn_site/defaults/main.yml create mode 100644 roles/ci_dcn_site/meta/main.yml create mode 100644 roles/ci_dcn_site/tasks/az.yml create mode 100644 roles/ci_dcn_site/tasks/ceph.yml create mode 100644 roles/ci_dcn_site/tasks/main.yml create mode 100644 roles/ci_dcn_site/tasks/post-ceph.yml create mode 100644 roles/ci_dcn_site/tasks/pre-ceph.yml create mode 100644 roles/ci_dcn_site/tasks/set_network_facts.yml create mode 100644 roles/ci_dcn_site/templates/deployment/values.yaml.j2 create mode 100644 roles/ci_dcn_site/templates/edpm-pre-ceph/deployment/values.yaml.j2 create mode 100644 roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 create mode 100644 roles/ci_dcn_site/templates/network-values/values.yaml.j2 create mode 100644 roles/ci_dcn_site/templates/service-values.yaml.j2 create mode 100644 roles/ci_dcn_site/templates/values.yaml.j2 rename scenarios/reproducers/{va-dcn.yml => dt-dcn.yml} (69%) diff --git a/docs/dictionary/en-custom.txt b/docs/dictionary/en-custom.txt index 79315d66c4..783c236922 100644 --- a/docs/dictionary/en-custom.txt +++ b/docs/dictionary/en-custom.txt @@ -21,6 +21,8 @@ authfile autoscale autostart awk +az +azs backend backends baremetal @@ -139,6 +141,7 @@ dnsdata dnsmasq dockerfile dryrun +dt dts ecdsa edecb @@ -333,6 +336,7 @@ nodeexporter nodenetworkconfigurationpolicy nodeps nodeset +nodesets nodetemplate noop nopasswd diff --git a/playbooks/dcn.yml b/playbooks/dcn.yml new file mode 100644 index 0000000000..043de22c57 --- /dev/null +++ b/playbooks/dcn.yml @@ -0,0 +1,66 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Deploy DCN environment + hosts: localhost + tasks: + - name: Load reproducer-variables + ansible.builtin.include_vars: + file: "~/reproducer-variables.yml" + + - name: Load networking-environment-definition + ansible.builtin.include_vars: + file: "/etc/ci/env/networking-environment-definition.yml" + name: cifmw_networking_env_definition + + - name: Create a network subnet list + ansible.builtin.set_fact: + _network_ranges: >- + {{ + cifmw_networking_env_definition.networks + | dict2items + | selectattr('key', 'search', '^ctlplane') + | map(attribute='value.network_v4') + | list + }} + + - name: Get OpenShift access token + register: _auth_results + community.okd.openshift_auth: + host: "{{ cifmw_openshift_api }}" + username: "{{ cifmw_openshift_user }}" + password: "{{ cifmw_openshift_password }}" + validate_certs: false + + - name: Deploy EDPM + loop: "{{ groups | dict2items | selectattr('key', 'search', 'compute') | list }}" + loop_control: + index_var: idx + loop_var: itm + vars: + _az: "az{{ idx }}" + _subnet: "subnet{{ idx + 1 }}" + _subnet_network_range: "{{ _network_ranges[idx] }}" + _group_name: "{{ itm.key }}" + _group_hosts: "{{ groups[itm.key] }}" + _edpm_hosts: "{{ cifmw_baremetal_hosts | dict2items | selectattr('key', 'in', groups[itm.key]) | items2dict }}" + _ceph_bootstrap_node: "{{ (_edpm_hosts | dict2items | first).key if _edpm_hosts | length > 0 else '' }}" + _ceph_vars_list: [] + when: + - _subnet_network_range != '' + - _ceph_bootstrap_node != '' + ansible.builtin.include_role: + name: ci_dcn_site diff --git a/roles/ci_dcn_site/README.md b/roles/ci_dcn_site/README.md new file mode 100644 index 0000000000..562474432c --- /dev/null +++ b/roles/ci_dcn_site/README.md @@ -0,0 +1,91 @@ +# ci_dcn_site + +Deploys DCN sites for testing. Each DCN site is a new EDPM nodeset +with a collocated Ceph cluster. + +## Privilege escalation + +- Applies CRDs in openstack namespace +- Runs openstack client commands to create aggregates and discover new + compute hosts + +## Parameters + +* `_az`: The name of the availability zone for the AZ, e.g. `az1` +* `_group_name`: The name of the group of nodes to be deployed, e.g. `dcn1-computes` +* `_subnet`: The name of the subnet the DCN site will use, e.g. `subnet2` +* `_subnet_network_range`: The range of the subnet the DCN site will use, e.g. `192.168.133.0/24` + +## Examples + +To deploy two nodesets named dcn1-computes and dcn2-computes, +the role may be called like this. +```yaml +- name: Deploy + include_role: ci_dcn_site + with_items: "{{ groups | dict2items | selectattr('key', 'search', 'compute') | list }}" + loop_control: + index_var: idx + loop_var: item + vars: + _subnet: "subnet{{ idx + 1 }}" + _group_name: "{{ item.key }}" + _az: "az{{ idx }}" + _subnet_network_range: "{{ _network_ranges[idx] }}" +``` +The above assumes the following values for each iteration: +``` +_subnet: subnet2 | _group_name: dcn1-computes | _az: az1 | _subnet_network_range: 192.168.133.0/24 +_subnet: subnet3 | _group_name: dcn2-computes | _az: az2 | _subnet_network_range: 192.168.144.0/24 +``` +It relies on the `ci-framework-data/artifacts/zuul_inventory.yml` which the +ci-framework will populate correctly when the `dt-dcn.yml` scenario is used. +The variables above can then be built with the following tasks before +the above is run. +```yaml + - name: Load reproducer-variables + ansible.builtin.include_vars: + file: "~/reproducer-variables.yml" + + - name: Load networking-environment-definition + ansible.builtin.include_vars: + file: "/etc/ci/env/networking-environment-definition.yml" + name: cifmw_networking_env_definition + + - name: Create a network subnet list + ansible.builtin.set_fact: + _network_ranges: >- + {{ + cifmw_networking_env_definition.networks + | dict2items + | selectattr('key', 'search', '^ctlplane') + | map(attribute='value.network_v4') + | list + }} +``` + +## Integration with Architecture Repository + +The directions in the +[DCN DT](https://github.com/openstack-k8s-operators/architecture/tree/main/examples/dt/dcn) +end with deploying the first Availability Zone (AZ) called `az0`. +Additional AZs may be deployed for testing by calling this role. + +The DCN DT contains values yaml files which may be passed to +kustomize. This role generates additional instances of the same +type of values files from jinja templates. The templates are populated +with the values in the environment which are set when the `dt-dcn.yml` +scenario is used. The role then calls kustomize to apply the CRDs. + +The role is executed by the dcn.yml playbook found in the playbooks +directory. This same playbook is called by the automation structure +in the DCN DT (`automation/vars/dcn.yaml`) by using a +`post_stage_run`. + +## Maintainers + +This role is maintained by the following + +- https://github.com/sbekkerm +- https://github.com/krcmarik +- https://github.com/fultonj diff --git a/roles/ci_dcn_site/defaults/main.yml b/roles/ci_dcn_site/defaults/main.yml new file mode 100644 index 0000000000..b0bcb71b16 --- /dev/null +++ b/roles/ci_dcn_site/defaults/main.yml @@ -0,0 +1,27 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +ci_dcn_site_arch_repo_path: /home/zuul/src/github.com/openstack-k8s-operators/architecture +ci_dcn_site_arch_path: "{{ ci_dcn_site_arch_repo_path }}/examples/dt/dcn" +ci_dcn_site_cifmw_repo_path: /home/zuul/src/github.com/openstack-k8s-operators/ci-framework +ci_dcn_site_search_storage_network_names: + - "storage" + - "storagedcn1" + - "storagedcn2" +ci_dcn_site_search_storagemgmt_network_names: + - "storagemgmt" + - "storagemgmtdcn1" + - "storagemgmtdcn2" diff --git a/roles/ci_dcn_site/meta/main.yml b/roles/ci_dcn_site/meta/main.yml new file mode 100644 index 0000000000..05ebd500cd --- /dev/null +++ b/roles/ci_dcn_site/meta/main.yml @@ -0,0 +1,30 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +galaxy_info: + author: CI Framework + description: CI Framework Role -- ci_dcn_site + company: Red Hat + license: Apache-2.0 + min_ansible_version: "2.14" + namespace: cifmw + galaxy_tags: + - cifmw + +# List your role dependencies here, one per line. Be sure to remove the '[]' above, +# if you add dependencies to this list. +dependencies: [] diff --git a/roles/ci_dcn_site/tasks/az.yml b/roles/ci_dcn_site/tasks/az.yml new file mode 100644 index 0000000000..42e25ccaa7 --- /dev/null +++ b/roles/ci_dcn_site/tasks/az.yml @@ -0,0 +1,55 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Check if AZ exists has hosts + register: az_hosts + ignore_errors: true + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack aggregate show {{ _az }} -c hosts -f value + +- name: Convert az_hosts string to list and remove extra text + ansible.builtin.set_fact: + az_hosts_list: > + {{ az_hosts.stdout + | default([]) + | from_yaml + | map('regex_replace', 'edpm-compute-(.*?)\\..*', 'compute-\\1') + | list }} + +- name: Create AZ if it does not exist + when: + - az_hosts.rc == 1 + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack aggregate create {{ _az }} --zone {{ _az }} + +- name: Add only the missing edpm hosts to AZ + loop: "{{ _edpm_hosts | dict2items }}" + when: + - item.key not in az_hosts_list + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: openstackclient + command: >- + openstack aggregate add host {{ _az }} edpm-{{ item.key }}.ctlplane.example.com diff --git a/roles/ci_dcn_site/tasks/ceph.yml b/roles/ci_dcn_site/tasks/ceph.yml new file mode 100644 index 0000000000..d13aab4b08 --- /dev/null +++ b/roles/ci_dcn_site/tasks/ceph.yml @@ -0,0 +1,92 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Fetch network facts of ceph bootstrap node + delegate_to: "{{ _ceph_bootstrap_node }}" + run_once: true + ansible.builtin.setup: + gather_subset: + - "!all" + - "!min" + - network + +- name: Update the hosts file on the Ceph bootstrap host + become: true + vars: + ceph_boot_ssh_ip: "{{ ansible_all_ipv4_addresses | ansible.utils.ipaddr(_subnet_network_range) | first }}" + delegate_to: "{{ _ceph_bootstrap_node }}" + run_once: true + ansible.builtin.lineinfile: + path: /etc/hosts + line: "{{ ceph_boot_ssh_ip }} {{ _ceph_bootstrap_node }}" + state: present + create: true + backup: true + insertbefore: EOF + +- name: Ensure Ceph bootstrap host can ping itself + register: _cmd_result + retries: 5 + delay: 60 + until: _cmd_result.rc == 0 + delegate_to: "{{ _ceph_bootstrap_node }}" + ansible.builtin.command: + cmd: >- + ping -c1 "{{ _ceph_bootstrap_node }}" + +- name: Create Ceph playbook variables file + ansible.builtin.copy: + dest: "~/ci-framework-data/parameters/ceph-{{ _az }}.yml" + mode: "0644" + content: | + --- + cifmw_cephadm_cluster: {{ _az }} + ssh_network_range: {{ _subnet_network_range }} + cifmw_ceph_target: {{ _group_name }} + storage_network_range: {{ _storage_network_range | ansible.utils.ipaddr('network/prefix') }} + storage_mgmt_network_range: {{ _storage_mgmt_network_range | ansible.utils.ipaddr('network/prefix') }} + cifmw_ceph_client_service_values_post_ceph_path_dst: /tmp/edpm_service_values_post_ceph_{{ _az }}.yaml + cifmw_ceph_client_values_post_ceph_path_dst: "{{ ci_dcn_site_arch_repo_path }}/values.yaml" + cifmw_ceph_spec_data_devices: >- + data_devices: + all: true + cifmw_ceph_client_vars: /tmp/ceph_client_{{_az}}.yml + +- name: Deploy Ceph + cifmw.general.ci_script: + output_dir: "/home/zuul/ci-framework-data/artifacts" + chdir: "{{ ci_dcn_site_cifmw_repo_path }}" + script: >- + ansible-playbook + -i ~/ci-framework-data/artifacts/zuul_inventory.yml + -e @~/ci-framework-data/parameters/reproducer-variables.yml + -e @~/ci-framework-data/parameters/ceph-{{ _az }}.yml + playbooks/ceph.yml + +- name: Load the Ceph cluster variables + ansible.builtin.include_vars: + file: "/tmp/ceph_client_{{_az}}.yml" + +- name: Find all ceph .conf and .keyring files + register: _ceph_conf_files + ansible.builtin.find: + paths: "/tmp" + patterns: "ceph*.conf,ceph*.keyring,az*.conf,az*.keyring" + recurse: false + +- name: Load ceph configuration files + ansible.builtin.set_fact: + _ceph_files: "{{ _ceph_conf_files.files | map(attribute='path') | list }}" diff --git a/roles/ci_dcn_site/tasks/main.yml b/roles/ci_dcn_site/tasks/main.yml new file mode 100644 index 0000000000..6aa127ba36 --- /dev/null +++ b/roles/ci_dcn_site/tasks/main.yml @@ -0,0 +1,37 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Set Network related facts + ansible.builtin.include_tasks: set_network_facts.yml + +- name: Render and apply pre-ceph CRs in DCN context + ansible.builtin.include_tasks: pre-ceph.yml + +- name: Deploy Ceph in DCN context + ansible.builtin.include_tasks: ceph.yml + +- name: Render and apply post-ceph CRs in DCN context + ansible.builtin.include_tasks: post-ceph.yml + +- name: Run Nova cell discovery for new DCN hosts + kubernetes.core.k8s_exec: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + namespace: openstack + pod: nova-cell0-conductor-0 + command: nova-manage cell_v2 discover_hosts --verbose + +- name: Create new AZ and add new hosts to it + ansible.builtin.include_tasks: az.yml diff --git a/roles/ci_dcn_site/tasks/post-ceph.yml b/roles/ci_dcn_site/tasks/post-ceph.yml new file mode 100644 index 0000000000..b251d12703 --- /dev/null +++ b/roles/ci_dcn_site/tasks/post-ceph.yml @@ -0,0 +1,107 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Render the post-ceph values.yaml + ansible.builtin.template: + mode: "0644" + backup: true + src: "templates/values.yaml.j2" + dest: "{{ ci_dcn_site_arch_path }}/values.yaml" + +- name: Find all ceph variable files + register: _ceph_vars_files + ansible.builtin.find: + paths: "/tmp" + patterns: "ceph_client_az*.yml" + recurse: false + +- name: Load all ceph vars from files + loop: "{{ _ceph_vars_files.files | map(attribute='path') | list }}" + register: _ceph_vars + ansible.builtin.include_vars: + file: "{{ item }}" + +- name: Combine ceph variables into a list of dictionaries + loop: "{{ _ceph_vars.results }}" + ansible.builtin.set_fact: + _ceph_vars_list: "{{ _ceph_vars_list | union([item.ansible_facts]) }}" + +- name: Render the post-ceph service-values.yaml + ansible.builtin.template: + mode: "0644" + backup: true + src: "templates/service-values.yaml.j2" + dest: "{{ ci_dcn_site_arch_path }}/service-values.yaml" + +- name: Kustomize post-ceph NodeSet + ansible.builtin.set_fact: + post_ceph_nodeset_cr: >- + {{ lookup('kubernetes.core.kustomize', + dir=ci_dcn_site_arch_path) }} + +- name: Save the post-ceph NodeSet CR + ansible.builtin.copy: + mode: "0644" + dest: "{{ ci_dcn_site_arch_path }}/dataplane-nodeset-post-ceph_{{ _az }}.yaml" + content: "{{ post_ceph_nodeset_cr }}" + backup: true + +- name: Render the post-ceph DataPlaneDeployment values.yaml + ansible.builtin.template: + mode: "0644" + backup: true + src: "templates/deployment/values.yaml.j2" + dest: "{{ ci_dcn_site_arch_path }}/deployment/values.yaml" + +- name: Kustomize post-ceph DataPlaneDeployment + ansible.builtin.set_fact: + post_ceph_deployment_cr: >- + {{ lookup('kubernetes.core.kustomize', + dir=ci_dcn_site_arch_path + '/deployment') }} + +- name: Save the post-ceph DataPlaneDeployment CR + ansible.builtin.copy: + mode: "0644" + dest: "{{ ci_dcn_site_arch_path }}/dataplane-deployment-post-ceph_{{ _az }}.yaml" + content: "{{ post_ceph_deployment_cr }}" + backup: true + +- name: Apply post-ceph NodeSet CR + register: result + retries: 5 + delay: 10 + until: result is not failed + kubernetes.core.k8s: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + state: present + apply: true + src: "{{ ci_dcn_site_arch_path }}/dataplane-nodeset-post-ceph_{{ _az }}.yaml" + +- name: Apply post-ceph DataPlaneDeployment CR + register: result + retries: 5 + delay: 10 + until: result is not failed + kubernetes.core.k8s: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + state: present + apply: true + src: "{{ ci_dcn_site_arch_path }}/dataplane-deployment-post-ceph_{{ _az }}.yaml" + wait: true + wait_condition: + type: Ready + status: "True" + wait_timeout: 3200 diff --git a/roles/ci_dcn_site/tasks/pre-ceph.yml b/roles/ci_dcn_site/tasks/pre-ceph.yml new file mode 100644 index 0000000000..7894b94e13 --- /dev/null +++ b/roles/ci_dcn_site/tasks/pre-ceph.yml @@ -0,0 +1,90 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Render the pre-ceph NodeSet values.yaml + vars: + _edpm_instance_dict: "{{ cifmw_networking_env_definition.instances }}" + _edpm_network_dict: "{{ cifmw_networking_env_definition.networks }}" + _ssh_authorizedkeys: "{{ lookup('file', '~/.ssh/id_cifw.pub', rstrip=False) }}" + _ssh_private_key: "{{ lookup('file', '~/.ssh/id_cifw', rstrip=False) }}" + _ssh_public_key: "{{ lookup('file', '~/.ssh/id_cifw.pub', rstrip=False) }}" + _migration_priv_key: "{{ lookup('file', '~/ci-framework-data/artifacts/nova_migration_key', rstrip=False) }}" + _migration_pub_key: "{{ lookup('file', '~/ci-framework-data/artifacts/nova_migration_key.pub', rstrip=False) }}" + ansible.builtin.template: + backup: true + src: "templates/edpm-pre-ceph/nodeset/values.yaml.j2" + dest: "{{ ci_dcn_site_arch_path }}/edpm-pre-ceph/nodeset/values.yaml" + mode: "0644" + +- name: Render the pre-ceph DataPlaneDeployment values.yaml + ansible.builtin.template: + mode: "0644" + backup: true + src: "templates/edpm-pre-ceph/deployment/values.yaml.j2" + dest: "{{ ci_dcn_site_arch_path }}/edpm-pre-ceph/deployment/values.yaml" + +- name: Kustomize pre-ceph NodeSet + ansible.builtin.set_fact: + nodeset_cr: >- + {{ lookup('kubernetes.core.kustomize', + dir=ci_dcn_site_arch_path + '/edpm-pre-ceph/nodeset') }} + +- name: Save the pre-ceph NodeSet CR + ansible.builtin.copy: + mode: "0644" + dest: "{{ ci_dcn_site_arch_path }}/dataplane-nodeset-pre-ceph_{{ _az }}.yaml" + content: "{{ nodeset_cr }}" + backup: true + +- name: Kustomize pre-ceph DataPlaneDeployment + ansible.builtin.set_fact: + deployment_cr: >- + {{ lookup('kubernetes.core.kustomize', + dir=ci_dcn_site_arch_path + '/edpm-pre-ceph/deployment') }} + +- name: Save the pre-ceph DataPlaneDeployment CR + ansible.builtin.copy: + mode: "0644" + dest: "{{ ci_dcn_site_arch_path }}/dataplane-deployment-pre-ceph_{{ _az }}.yaml" + content: "{{ deployment_cr }}" + backup: true + +- name: Apply pre-ceph NodeSet CR + register: result + retries: 5 + delay: 10 + until: result is not failed + kubernetes.core.k8s: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + state: present + apply: true + src: "{{ ci_dcn_site_arch_path }}/dataplane-nodeset-pre-ceph_{{ _az }}.yaml" + +- name: Apply pre-ceph DataPlaneDeployment + register: result + retries: 10 + delay: 10 + until: result is not failed + kubernetes.core.k8s: + api_key: "{{ _auth_results.openshift_auth.api_key }}" + state: present + apply: true + src: "{{ ci_dcn_site_arch_path }}/dataplane-deployment-pre-ceph_{{ _az }}.yaml" + wait: true + wait_condition: + type: Ready + status: "True" + wait_timeout: 2400 diff --git a/roles/ci_dcn_site/tasks/set_network_facts.yml b/roles/ci_dcn_site/tasks/set_network_facts.yml new file mode 100644 index 0000000000..753f42ffda --- /dev/null +++ b/roles/ci_dcn_site/tasks/set_network_facts.yml @@ -0,0 +1,43 @@ +--- +# Copyright Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +- name: Find storage network parameters + ansible.builtin.set_fact: + selected_storage_network: >- + {{ cifmw_networking_env_definition.instances[_group_hosts | first]['networks'] + | dict2items + | selectattr('key', 'in', ci_dcn_site_search_storage_network_names) + | map(attribute='value') + | first }} + +- name: Find storagemgmt network parameters + ansible.builtin.set_fact: + selected_storagemgmt_network: >- + {{ cifmw_networking_env_definition.instances[_group_hosts | first]['networks'] + | dict2items + | selectattr('key', 'in', ci_dcn_site_search_storagemgmt_network_names) + | map(attribute='value') + | first }} + +- name: Calculate storage network CIDR + ansible.builtin.set_fact: + _storage_network_range: >- + {{ selected_storage_network.ip_v4 }}/{{ selected_storage_network.prefix_length_v4 }} + +- name: Calculate storagemgmt network CIDR + ansible.builtin.set_fact: + _storage_mgmt_network_range: >- + {{ selected_storagemgmt_network.ip_v4 }}/{{ selected_storagemgmt_network.prefix_length_v4 }} diff --git a/roles/ci_dcn_site/templates/deployment/values.yaml.j2 b/roles/ci_dcn_site/templates/deployment/values.yaml.j2 new file mode 100644 index 0000000000..303765c697 --- /dev/null +++ b/roles/ci_dcn_site/templates/deployment/values.yaml.j2 @@ -0,0 +1,20 @@ +# local-config: referenced, but not emitted by kustomize +--- +# source: dcn/deployment/values.yaml.j2 +apiVersion: v1 +kind: ConfigMap +metadata: + name: edpm-deployment-values-post-ceph + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset_name: {{ _group_name }}-edpm + deployment: + name: post-ceph-{{ _group_name }} + servicesOverride: + - install-certs + - ceph-client + - ovn + - neutron-metadata + - libvirt + - nova-custom-ceph-{{ _az }} diff --git a/roles/ci_dcn_site/templates/edpm-pre-ceph/deployment/values.yaml.j2 b/roles/ci_dcn_site/templates/edpm-pre-ceph/deployment/values.yaml.j2 new file mode 100644 index 0000000000..9c842ecf93 --- /dev/null +++ b/roles/ci_dcn_site/templates/edpm-pre-ceph/deployment/values.yaml.j2 @@ -0,0 +1,22 @@ +--- +# source: dcn/edpm-pre-ceph/deployment/values.yaml.j2 +apiVersion: v1 +data: + nodeset_name: {{ _group_name }}-edpm + deployment: + name: pre-ceph-{{ _group_name }} + servicesOverride: + - bootstrap + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + - ssh-known-hosts + - run-os + - reboot-os +kind: ConfigMap +metadata: + annotations: + config.kubernetes.io/local-config: 'true' + name: edpm-deployment-values diff --git a/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 b/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 new file mode 100644 index 0000000000..d3cbb30f26 --- /dev/null +++ b/roles/ci_dcn_site/templates/edpm-pre-ceph/nodeset/values.yaml.j2 @@ -0,0 +1,164 @@ +--- +# source: dcn/edpm-pre-ceph/nodeset/values.yaml.j2 +apiVersion: v1 +kind: ConfigMap +metadata: + name: edpm-nodeset-values + annotations: + config.kubernetes.io/local-config: "true" +data: + nodeset_name: {{ _group_name }}-edpm + ssh_keys: + # Authorized keys that will have access to the dataplane computes via SSH + authorized: {{ _ssh_authorizedkeys | b64encode }} + # The private key that will have access to the dataplane computes via SSH + private: {{ _ssh_private_key | b64encode }} + # The public key that will have access to the dataplane computes via SSH + public: {{ _ssh_public_key | b64encode }} + nodeset: + ansible: + ansibleUser: "zuul" + ansiblePort: 22 + ansibleVars: + timesync_ntp_servers: + - hostname: clock.redhat.com + # CHANGEME -- see https://access.redhat.com/solutions/253273 + # edpm_bootstrap_command: | + # subscription-manager register --username \ + # --password + # podman login -u -p registry.redhat.io + edpm_network_config_hide_sensitive_logs: false + edpm_network_config_os_net_config_mappings: +{% for _host_name in _edpm_hosts.keys() %} + edpm-{{ _host_name }}: +{% for nic in _edpm_hosts[_host_name].nics %} + nic{{ loop.index }}: "{{ nic.mac }}" +{% endfor %} +{% endfor %} +{% raw %} + edpm_network_config_template: | + --- + {% set mtu_list = [ctlplane_mtu] %} + {% for network in nodeset_networks %} + {{ mtu_list.append(lookup('vars', networks_lower[network] ~ '_mtu')) }} + {%- endfor %} + {% set min_viable_mtu = mtu_list | max %} + network_config: + - type: ovs_bridge + name: br-ex + use_dhcp: false + members: + - type: interface + name: nic1 + primary: false + - type: ovs_bridge + name: {{ neutron_physical_bridge_name }} + mtu: {{ min_viable_mtu }} + use_dhcp: false + dns_servers: {{ ctlplane_dns_nameservers }} + domain: {{ dns_search_domains }} + addresses: + - ip_netmask: {{ ctlplane_ip }}/{{ ctlplane_cidr }} + routes: {{ ctlplane_host_routes }} + members: + - type: interface + name: nic2 + mtu: {{ min_viable_mtu }} + # force the MAC address of the bridge to this interface + primary: true + {% for network in nodeset_networks %} + - type: vlan + mtu: {{ lookup('vars', networks_lower[network] ~ '_mtu') }} + vlan_id: {{ lookup('vars', networks_lower[network] ~ '_vlan_id') }} + addresses: + - ip_netmask: + {{ lookup('vars', networks_lower[network] ~ '_ip') }}/{{ lookup('vars', networks_lower[network] ~ '_cidr') }} + routes: {{ lookup('vars', networks_lower[network] ~ '_host_routes') }} + {% endfor %} +{% endraw %} + edpm_bootstrap_release_version_package: [] + edpm_nodes_validation_validate_controllers_icmp: false + edpm_nodes_validation_validate_gateway_icmp: false + edpm_sshd_allowed_ranges: + - 192.168.111.0/24 +{% for network_name, network_data in _edpm_network_dict.items() %} +{% if network_name.startswith('ctlplane') %} + - {{ network_data.network_v4 }} +{% endif %} +{% endfor %} + edpm_sshd_configure_firewall: true + gather_facts: false +{% if 'dcn1' in _group_name %} + edpm_ovn_bridge_mappings: ["leaf1:br-ex"] +{% elif 'dcn2'in _group_name %} + edpm_ovn_bridge_mappings: ["leaf2:br-ex"] +{%endif %} + neutron_physical_bridge_name: br-ctl + neutron_public_interface_name: eth0 + edpm_ceph_hci_pre_enabled_services: + - ceph_mon + - ceph_mgr + - ceph_osd + - ceph_rgw + - ceph_nfs + - ceph_rgw_frontend + - ceph_nfs_frontend + storage_mtu: 9000 + storage_mgmt_mtu: 9000 + storage_mgmt_vlan_id: 23 + storage_mgmt_cidr: "24" + storage_mgmt_host_routes: [] + networks: + - defaultRoute: true + name: ctlplane + subnetName: {{ _subnet }} + - name: internalapi + subnetName: {{ _subnet }} + - name: storage + subnetName: {{ _subnet }} + - name: tenant + subnetName: {{ _subnet }} + nodes: +{% for _host_name in _edpm_hosts.keys() %} +{% for network_name, network_data in _edpm_instance_dict[_host_name].networks.items() %} +{% if network_name.startswith('ctlplane') %} + edpm-{{ _host_name }}: + ansible: + ansibleHost: {{ network_data['ip_v4'] }} + hostName: edpm-{{ _host_name }} + networks: + - defaultRoute: true + fixedIP: {{ network_data['ip_v4'] }} + name: ctlplane + subnetName: {{ _subnet }} + - name: internalapi + subnetName: {{ _subnet }} + - name: storage + subnetName: {{ _subnet }} + - name: storagemgmt + subnetName: {{ _subnet }} + - name: tenant + subnetName: {{ _subnet }} +{% endif %} +{% endfor %} +{% endfor %} + services: + - bootstrap + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ceph-client + - ovn + - neutron-metadata + - libvirt + nova: + migration: + ssh_keys: + private: {{ _migration_priv_key | b64encode }} + public: {{ _migration_pub_key | b64encode }} diff --git a/roles/ci_dcn_site/templates/network-values/values.yaml.j2 b/roles/ci_dcn_site/templates/network-values/values.yaml.j2 new file mode 100644 index 0000000000..2c16ac3ed1 --- /dev/null +++ b/roles/ci_dcn_site/templates/network-values/values.yaml.j2 @@ -0,0 +1,219 @@ +--- +# source: dcn/network-values/values.yaml.j2 +{% set ns = namespace(interfaces={}, + ocp_index=0, + lb_tools={}) %} +data: +{% for host in cifmw_networking_env_definition.instances.keys() -%} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() -%} +{% set ns.interfaces = ns.interfaces | + combine({network.network_name: (network.parent_interface | + default(network.interface_name) + ) + }, + recursive=true) -%} +{% endfor -%} +{% if host is match('^(ocp|crc).*') %} + node_{{ ns.ocp_index }}: +{% set ns.ocp_index = ns.ocp_index+1 %} + name: {{ cifmw_networking_env_definition.instances[host]['hostname'] }} +{% for network in cifmw_networking_env_definition.instances[host]['networks'].values() %} + {{ network.network_name }}_ip: {{ network.ip_v4 }} +{% endfor %} +{% endif %} +{% endfor %} + +{% for network in cifmw_networking_env_definition.networks.values() %} +{% set dcn1_net = cifmw_networking_env_definition.networks[network.network_name + 'dcn1'] %} +{% set dcn2_net = cifmw_networking_env_definition.networks[network.network_name + 'dcn2'] %} +{% set ns.lb_tools = {} %} + {{ network.network_name }}: + dnsDomain: {{ network.search_domain }} +{% if network.tools is defined and network.tools.keys() | length > 0 %} + subnets: +{% for tool in network.tools.keys() %} +{% if tool is match('.*lb$') %} +{% set _ = ns.lb_tools.update({tool: []}) %} +{% endif %} +{% endfor %} +{% if (dcn1_net is defined) and (dcn2_net is defined) %} + - allocationRanges: +{% for range in network.tools.netconfig.ipv4_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network.network_v4 }} +{% if network.gw_v4 is defined %} + gateway: {{ network.gw_v4 }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} + routes: + - destination: "{{ dcn1_net.network_v4 }}" + nexthop: "{{network.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" + - destination: "{{ dcn2_net.network_v4 }}" + nexthop: "{{network.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" + - allocationRanges: +{% for range in dcn1_net.tools.netconfig.ipv4_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ dcn1_net.network_v4 }} +{% if dcn1_net.gw_v4 is defined %} + gateway: {{ dcn1_net.gw_v4 }} +{% endif %} + name: subnet2 +{% if dcn1_net.vlan_id is defined %} + vlan: {{ dcn1_net.vlan_id }} +{% endif %} + routes: + - destination: "{{ network.network_v4 }}" + nexthop: "{{dcn1_net.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" + - destination: "{{ dcn2_net.network_v4 }}" + nexthop: "{{dcn1_net.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" + - allocationRanges: +{% for range in dcn2_net.tools.netconfig.ipv4_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ dcn2_net.network_v4 }} +{% if dcn2_net.gw_v4 is defined %} + gateway: {{ dcn2_net.gw_v4 }} +{% endif %} + name: subnet3 +{% if dcn2_net.vlan_id is defined %} + vlan: {{ dcn2_net.vlan_id }} +{% endif %} + routes: + - destination: "{{ network.network_v4 }}" + nexthop: "{{dcn2_net.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" + - destination: "{{ dcn1_net.network_v4 }}" + nexthop: "{{dcn2_net.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" +{% else %} + - allocationRanges: +{% for range in network.tools.netconfig.ipv4_ranges %} + - end: {{ range.end }} + start: {{ range.start }} +{% endfor %} + cidr: {{ network.network_v4 }} +{% if network.gw_v4 is defined %} + gateway: {{ network.gw_v4 }} +{% endif %} + name: subnet1 +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% endif %} +{% endif %} +{% if ns.lb_tools | length > 0 %} + lb_addresses: +{% for tool in ns.lb_tools.keys() %} +{% for lb_range in network.tools[tool].ipv4_ranges %} + - {{ lb_range.start }}-{{ lb_range.end }} +{% set _ = ns.lb_tools[tool].append(lb_range.start) %} +{% endfor %} + endpoint_annotations: + {{ tool }}.universe.tf/address-pool: {{ network.network_name }} + {{ tool }}.universe.tf/allow-shared-ip: {{ network.network_name }} + {{ tool }}.universe.tf/loadBalancerIPs: {{ ','.join(ns.lb_tools[tool]) }} +{% endfor %} +{% endif %} +{% endif %} + prefix-length: {{ network.network_v4 | ansible.utils.ipaddr('prefix') }} + mtu: {{ network.mtu | default(1500) }} +{% if network.vlan_id is defined %} + vlan: {{ network.vlan_id }} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ network.network_name }} + base_iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% else %} +{% if ns.interfaces[network.network_name] is defined %} + iface: {{ ns.interfaces[network.network_name] }} +{% endif %} +{% endif %} +{% if network.tools.multus is defined %} + net-attach-def: | + { + "cniVersion": "0.3.1", + "name": "{{ network.network_name }}", + "type": "macvlan", +{% if network.vlan_id is defined%} + "master": "{{ network.network_name }}", +{% elif network.network_name == "ctlplane" %} + "master": "ospbr", +{% else %} + "master": "{{ ns.interfaces[network.network_name] }}", +{% endif %} + "ipam": { + "type": "whereabouts", + "range": "{{ network.network_v4 }}", + "range_start": "{{ network.tools.multus.ipv4_ranges.0.start }}", +{% if (dcn1_net is defined) and (dcn2_net is defined) %} + "range_end": "{{ network.tools.multus.ipv4_ranges.0.end }}", + "routes": [ + { "dst": "{{ dcn1_net.network_v4 }}", "gw": "{{network.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" }, + { "dst": "{{ dcn2_net.network_v4 }}", "gw": "{{ network.network_v4 | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address')}}" } + ] +{% else %} + "range_end": "{{ network.tools.multus.ipv4_ranges.0.end }}" +{% endif %} + } + } +{% endif %} +{% endfor %} + + dns-resolver: + config: + server: + - "{{ cifmw_networking_env_definition.networks.ctlplane.gw_v4 }}" + search: [] + options: + - key: server + values: + - {{ cifmw_networking_env_definition.networks.ctlplane.gw_v4 }} +{% for nameserver in cifmw_ci_gen_kustomize_values_nameservers %} + - key: server + values: + - {{ nameserver }} +{% endfor %} + routes: + config: + - destination: 192.168.133.0/24 + next-hop-address: 192.168.122.1 + next-hop-interface: ospbr + - destination: 192.168.144.0/24 + next-hop-address: 192.168.122.1 + next-hop-interface: ospbr + - destination: 172.17.10.0/24 + next-hop-address: 172.17.0.1 + next-hop-interface: internalapi + - destination: 172.18.10.0/24 + next-hop-address: 172.18.0.1 + next-hop-interface: storage + - destination: 172.19.10.0/24 + next-hop-address: 172.19.0.1 + next-hop-interface: tenant + - destination: 172.17.20.0/24 + next-hop-address: 172.17.0.1 + next-hop-interface: internalapi + - destination: 172.18.20.0/24 + next-hop-address: 172.18.0.1 + next-hop-interface: storage + - destination: 172.19.20.0/24 + next-hop-address: 172.19.0.1 + next-hop-interface: tenant + +# Hardcoding the last IP bit since we don't have support for endpoint_annotations in the networking_mapper output + rabbitmq: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'].network_v4 | ansible.utils.ipmath(85) }} + rabbitmq-cell1: + endpoint_annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/loadBalancerIPs: {{ cifmw_networking_env_definition.networks['internalapi'].network_v4 | ansible.utils.ipmath(86) }} + + lbServiceType: LoadBalancer + storageClass: {{ cifmw_ci_gen_kustomize_values_storage_class }} diff --git a/roles/ci_dcn_site/templates/service-values.yaml.j2 b/roles/ci_dcn_site/templates/service-values.yaml.j2 new file mode 100644 index 0000000000..6505c5e2be --- /dev/null +++ b/roles/ci_dcn_site/templates/service-values.yaml.j2 @@ -0,0 +1,160 @@ +--- +# source: dcn/service-values.yaml.j2 +apiVersion: v1 +kind: ConfigMap +metadata: + name: service-values + annotations: + config.kubernetes.io/local-config: "true" +data: + preserveJobs: false + cinderAPI: + replicas: 3 + customServiceConfig: | + [DEFAULT] + default_availability_zone = az0 + cinderBackup: + replicas: 3 + customServiceConfig: | + [DEFAULT] + backup_driver = cinder.backup.drivers.ceph.CephBackupDriver + backup_ceph_pool = backups + backup_ceph_user = openstack + cinderVolumes: +{% for _ceph in _ceph_vars_list %} + {{ _ceph.cifmw_ceph_client_cluster }}: + customServiceConfig: | + [DEFAULT] + enabled_backends = ceph +{% if 'az0' not in _ceph.cifmw_ceph_client_cluster %} + glance_api_servers = https://glance-{{ _ceph.cifmw_ceph_client_cluster }}-internal.openstack.svc:9292 +{% endif %} + [ceph] + volume_backend_name = ceph + volume_driver = cinder.volume.drivers.rbd.RBDDriver + rbd_ceph_conf = /etc/ceph/{{ _ceph.cifmw_ceph_client_cluster }}.conf + rbd_user = openstack + rbd_pool = volumes + rbd_flatten_volume_from_snapshot = False + rbd_secret_uuid = {{ _ceph.cifmw_ceph_client_fsid }} + rbd_cluster_name = {{ _ceph.cifmw_ceph_client_cluster }} + backend_availability_zone = {{ _ceph.cifmw_ceph_client_cluster }} +{% endfor %} + glance: + customServiceConfig: | + [DEFAULT] + enabled_backends = default_backend:rbd + [glance_store] + default_backend = default_backend + [default_backend] + rbd_store_ceph_conf = /etc/ceph/az0.conf + store_description = "RBD backend" + rbd_store_pool = images + rbd_store_user = openstack + rbd_thin_provisioning = True + glanceAPIs: +{% set backends = [] %} +{% for _ceph in _ceph_vars_list %} +{% if _ceph.cifmw_ceph_client_cluster not in backends %} +{% set _ = backends.append(_ceph.cifmw_ceph_client_cluster + ':rbd') %} +{% endif %} +{% endfor %} +{% for _ceph in _ceph_vars_list %} +{% if 'az0' in _ceph.cifmw_ceph_client_cluster %} + default: +{% else %} + {{ _ceph.cifmw_ceph_client_cluster }}: +{% endif %} + customServiceConfig: | + [DEFAULT] + enabled_import_methods = [web-download,copy-image,glance-direct] + enabled_backends = {{ backends | join(',') }} + [glance_store] + default_backend = {{ _ceph.cifmw_ceph_client_cluster }} + [{{ _ceph.cifmw_ceph_client_cluster }}] + rbd_store_ceph_conf = /etc/ceph/{{ _ceph.cifmw_ceph_client_cluster }}.conf + store_description = "{{ _ceph.cifmw_ceph_client_cluster }} RBD backend" + rbd_store_pool = images + rbd_store_user = openstack + rbd_thin_provisioning = True +{% for _ceph_az in _ceph_vars_list %} +{% if _ceph_az.cifmw_ceph_client_cluster != _ceph.cifmw_ceph_client_cluster %} + [{{ _ceph_az.cifmw_ceph_client_cluster }}] + rbd_store_ceph_conf = /etc/ceph/{{ _ceph_az.cifmw_ceph_client_cluster }}.conf + store_description = "{{ _ceph_az.cifmw_ceph_client_cluster }} RBD backend" + rbd_store_pool = images + rbd_store_user = openstack + rbd_thin_provisioning = True +{% endif %} +{% endfor %} + networkAttachments: + - storage + override: + service: + internal: + metadata: + annotations: + metallb.universe.tf/address-pool: internalapi + metallb.universe.tf/allow-shared-ip: internalapi + metallb.universe.tf/loadBalancerIPs: 172.17.0.8{{ loop.index0 }} + spec: + type: LoadBalancer + replicas: 3 +{% if _ceph.cifmw_ceph_client_cluster == 'az0' %} + type: split +{% else %} + type: edge +{% endif %} +{% endfor %} + manila: + enabled: false + manilaAPI: + customServiceConfig: | + [DEFAULT] + enabled_share_protocols=nfs,cephfs + manilaShares: + share1: + customServiceConfig: | + [DEFAULT] + enabled_share_backends = cephfs + enabled_share_protocols = cephfs + [cephfs] + driver_handles_share_servers = False + share_backend_name = cephfs + share_driver = manila.share.drivers.cephfs.driver.CephFSDriver + cephfs_conf_path = /etc/ceph/ceph.conf + cephfs_cluster_name = ceph + cephfs_auth_id=openstack + cephfs_volume_mode = 0755 + cephfs_protocol_helper_type = CEPHFS + neutron: + template: + customServiceConfig: | + [ml2_type_vlan] + network_vlan_ranges = datacentre:1:1000,leaf1:1:1000,leaf2:1:1000 + [neutron] + physnets = datacentre,leaf1,leaf2 + nova: + customServiceConfig: | + [DEFAULT] + default_schedule_zone=az0 + extraMounts: + - name: v1 + region: r1 + extraVol: + - propagation: + - CinderVolume + - CinderBackup + - GlanceAPI + - ManilaShare + extraVolType: Ceph + volumes: + - name: ceph + projected: + sources: + - secret: + name: ceph-conf-files + mounts: + - name: ceph + mountPath: /etc/ceph + readOnly: true diff --git a/roles/ci_dcn_site/templates/values.yaml.j2 b/roles/ci_dcn_site/templates/values.yaml.j2 new file mode 100644 index 0000000000..440d26096a --- /dev/null +++ b/roles/ci_dcn_site/templates/values.yaml.j2 @@ -0,0 +1,63 @@ +--- +# source: dcn/values.yaml.j2 +apiVersion: v1 +data: + customDataplanService: + name: nova-custom-ceph-{{ _az }} + nodeset_name: {{ _group_name }}-edpm + ceph_conf: +{% for _file in _ceph_files %} + {{ _file | basename }}: {{ lookup('file', _file, rstrip=False) | b64encode }} +{% endfor %} + nodeset: + services: + - bootstrap + - configure-network + - validate-network + - install-os + - ceph-hci-pre + - configure-os + - ssh-known-hosts + - run-os + - reboot-os + - install-certs + - ceph-client + - ovn + - neutron-metadata + - libvirt + - nova-custom-ceph-{{ _az }} + nova: + ceph: + conf: | + [libvirt] + images_type=rbd + images_rbd_pool=vms + images_rbd_ceph_conf=/etc/ceph/{{ cifmw_ceph_client_cluster }}.conf + images_rbd_glance_store_name={{ cifmw_ceph_client_cluster }} + images_rbd_glance_copy_poll_interval=15 + images_rbd_glance_copy_timeout=600 + rbd_user=openstack + rbd_secret_uuid={{ cifmw_ceph_client_fsid }} + [glance] +{% if 'az0' in cifmw_ceph_client_cluster %} + endpoint_override = https://glance-default-internal.openstack.svc:9292 +{% else %} + endpoint_override = https://glance-{{ _az }}-internal.openstack.svc:9292 +{% endif %} + valid_interfaces = internal + [cinder] + cross_az_attach = False + catalog_info = volumev3:cinderv3:internalURL + name: ceph-nova-{{ _az }} + dataSources: + - configMapRef: + name: ceph-nova-{{ _az }} + - secretRef: + name: nova-cell1-compute-config + - secretRef: + name: nova-migration-ssh-key +kind: ConfigMap +metadata: + annotations: + config.kubernetes.io/local-config: 'true' + name: edpm-nodeset-values-post-ceph diff --git a/scenarios/reproducers/va-dcn.yml b/scenarios/reproducers/dt-dcn.yml similarity index 69% rename from scenarios/reproducers/va-dcn.yml rename to scenarios/reproducers/dt-dcn.yml index 3b47c2d0ca..6e4294dbe0 100644 --- a/scenarios/reproducers/va-dcn.yml +++ b/scenarios/reproducers/dt-dcn.yml @@ -120,7 +120,7 @@ cifmw_libvirt_manager_configuration: memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" extra_disks_num: 3 - extra_disks_size: 30G + extra_disks_size: 15G nets: - ocpbm - osp_trunk @@ -145,6 +145,8 @@ cifmw_libvirt_manager_configuration: sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" disk_file_name: "base-os.qcow2" + extra_disks_num: 3 + extra_disks_size: 15G disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" @@ -159,6 +161,8 @@ cifmw_libvirt_manager_configuration: sha256_image_name: "{{ cifmw_discovered_hash }}" image_local_dir: "{{ cifmw_basedir }}/images/" disk_file_name: "base-os.qcow2" + extra_disks_num: 3 + extra_disks_size: 15G disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}" memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}" cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}" @@ -232,6 +236,7 @@ cifmw_networking_definition: end: 90 internalapi: network: "172.17.0.0/24" + gateway: "172.17.0.1" vlan: 20 mtu: 1496 tools: @@ -249,6 +254,7 @@ cifmw_networking_definition: end: 70 internalapidcn1: network: "172.17.10.0/24" + gateway: "172.17.10.1" vlan: 30 mtu: 1496 tools: @@ -266,6 +272,7 @@ cifmw_networking_definition: end: 70 internalapidcn2: network: "172.17.20.0/24" + gateway: "172.17.20.1" vlan: 40 mtu: 1496 tools: @@ -283,6 +290,7 @@ cifmw_networking_definition: end: 70 storage: network: "172.18.0.0/24" + gateway: "172.18.0.1" vlan: 21 mtu: 1496 tools: @@ -300,6 +308,7 @@ cifmw_networking_definition: end: 70 storagedcn1: network: "172.18.10.0/24" + gateway: "172.18.10.1" vlan: 31 mtu: 1496 tools: @@ -317,6 +326,7 @@ cifmw_networking_definition: end: 70 storagedcn2: network: "172.18.20.0/24" + gateway: "172.18.20.1" vlan: 41 mtu: 1496 tools: @@ -334,6 +344,7 @@ cifmw_networking_definition: end: 70 storagemgmt: network: "172.20.0.0/24" + gateway: "172.20.0.1" vlan: 23 tools: netconfig: @@ -344,6 +355,7 @@ cifmw_networking_definition: mtu: 1500 storagemgmtdcn1: network: "172.20.10.0/24" + gateway: "172.20.10.1" vlan: 33 mtu: 1500 tools: @@ -353,6 +365,7 @@ cifmw_networking_definition: end: 250 storagemgmtdcn2: network: "172.20.20.0/24" + gateway: "172.20.20.1" vlan: 43 mtu: 1500 tools: @@ -362,6 +375,7 @@ cifmw_networking_definition: end: 250 tenant: network: "172.19.0.0/24" + gateway: "172.19.0.1" tools: metallb: ranges: @@ -379,6 +393,7 @@ cifmw_networking_definition: mtu: 1496 tenantdcn1: network: "172.19.10.0/24" + gateway: "172.19.10.1" vlan: 32 mtu: 1496 tools: @@ -396,6 +411,7 @@ cifmw_networking_definition: end: 70 tenantdcn2: network: "172.19.20.0/24" + gateway: "172.19.20.1" vlan: 42 mtu: 1496 tools: @@ -484,3 +500,122 @@ cifmw_networking_definition: networks: ctlplane: ip: "192.168.122.9" +cifmw_libvirt_manager_extra_network_configuration: + interfaces: + - name: "vlan{{ cifmw_networking_definition.networks.internalapi.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-osp_trunk + id: "{{ cifmw_networking_definition.networks.internalapi.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.internalapi.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.internalapi.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.internalapidcn1.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-dcn1_tr + id: "{{ cifmw_networking_definition.networks.internalapidcn1.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.internalapidcn1.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.internalapidcn1.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.internalapidcn2.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-dcn2_tr + id: "{{ cifmw_networking_definition.networks.internalapidcn2.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.internalapidcn2.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.internalapidcn2.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.storage.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-osp_trunk + id: "{{ cifmw_networking_definition.networks.storage.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.storage.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.storage.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.storagedcn1.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-dcn1_tr + id: "{{ cifmw_networking_definition.networks.storagedcn1.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.storagedcn1.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.storagedcn1.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.storagedcn2.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-dcn2_tr + id: "{{ cifmw_networking_definition.networks.storagedcn2.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.storagedcn2.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.storagedcn2.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.storagemgmt.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-osp_trunk + id: "{{ cifmw_networking_definition.networks.tenant.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.tenant.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.tenant.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.tenantdcn1.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-dcn1_tr + id: "{{ cifmw_networking_definition.networks.tenantdcn1.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.tenantdcn1.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.tenantdcn1.network | ansible.utils.ipaddr('prefix') }}" + - name: "vlan{{ cifmw_networking_definition.networks.tenantdcn2.vlan }}" + type: vlan + state: up + vlan: + base-iface: cifmw-dcn2_tr + id: "{{ cifmw_networking_definition.networks.tenantdcn2.vlan }}" + protocol: 802.1q + ipv4: + enabled: true + dhcp: false + address: + - ip: "{{ cifmw_networking_definition.networks.tenantdcn2.gateway }}" + prefix-length: "{{ cifmw_networking_definition.networks.tenantdcn2.network | ansible.utils.ipaddr('prefix') }}"