From b52941fccbf2d1a271cce3539e47158f64ae2cea Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Wed, 14 Feb 2024 11:14:02 +0100 Subject: [PATCH] address Ansible linter errors This addresses all errors reported by the Ansible linter. Signed-off-by: Guillaume Abrioux --- .github/workflows/ansible-lint.yml | 9 +- dashboard.yml | 90 +- group_vars/all.yml.sample | 65 +- group_vars/clients.yml.sample | 4 +- group_vars/iscsigws.yml.sample | 6 +- group_vars/mdss.yml.sample | 6 +- group_vars/mgrs.yml.sample | 6 +- group_vars/mons.yml.sample | 6 +- group_vars/nfss.yml.sample | 28 +- group_vars/osds.yml.sample | 36 +- group_vars/rbdmirrors.yml.sample | 6 +- group_vars/rgwloadbalancers.yml.sample | 10 +- group_vars/rgws.yml.sample | 58 +- group_vars/rhcs.yml.sample | 63 +- infrastructure-playbooks/add-mon.yml | 90 +- .../backup-and-restore-ceph-files.yml | 47 +- infrastructure-playbooks/ceph-keys.yml | 21 +- infrastructure-playbooks/cephadm-adopt.yml | 833 +++++++-------- infrastructure-playbooks/cephadm.yml | 213 ++-- infrastructure-playbooks/docker-to-podman.yml | 132 ++- infrastructure-playbooks/gather-ceph-logs.yml | 40 +- infrastructure-playbooks/lv-create.yml | 136 +-- infrastructure-playbooks/lv-teardown.yml | 173 ++-- infrastructure-playbooks/purge-cluster.yml | 663 ++++++------ infrastructure-playbooks/purge-dashboard.yml | 98 +- .../purge-iscsi-gateways.yml | 69 +- .../rgw-add-users-buckets.yml | 94 +- infrastructure-playbooks/rolling_update.yml | 962 +++++++++++------- infrastructure-playbooks/shrink-mds.yml | 109 +- infrastructure-playbooks/shrink-mgr.yml | 81 +- infrastructure-playbooks/shrink-mon.yml | 90 +- infrastructure-playbooks/shrink-osd.yml | 172 ++-- infrastructure-playbooks/shrink-rbdmirror.yml | 70 +- infrastructure-playbooks/shrink-rgw.yml | 71 +- .../storage-inventory.yml | 23 +- ...inerized-to-containerized-ceph-daemons.yml | 458 +++++---- .../take-over-existing-cluster.yml | 50 +- .../untested-by-ci/cluster-maintenance.yml | 8 +- .../untested-by-ci/cluster-os-migration.yml | 92 +- .../untested-by-ci/make-osd-partitions.yml | 26 +- .../untested-by-ci/migrate-journal-to-ssd.yml | 44 +- .../untested-by-ci/purge-multisite.yml | 4 +- ...recover-osds-after-ssd-journal-failure.yml | 38 +- .../untested-by-ci/replace-osd.yml | 86 +- roles/ceph-client/defaults/main.yml | 4 +- roles/ceph-client/meta/main.yml | 4 +- roles/ceph-client/tasks/create_users_keys.yml | 26 +- roles/ceph-client/tasks/main.yml | 8 +- roles/ceph-client/tasks/pre_requisite.yml | 14 +- roles/ceph-common/meta/main.yml | 4 +- .../tasks/configure_cluster_name.yml | 31 +- .../tasks/configure_memory_allocator.yml | 38 +- .../tasks/configure_repository.yml | 28 +- .../tasks/create_rbd_client_dir.yml | 4 +- ...nfigure_debian_repository_installation.yml | 16 +- .../configure_redhat_local_installation.yml | 34 +- ...nfigure_redhat_repository_installation.yml | 20 +- ...configure_suse_repository_installation.yml | 4 +- .../installs/debian_community_repository.yml | 18 +- .../installs/debian_custom_repository.yml | 10 +- .../tasks/installs/debian_dev_repository.yml | 12 +- .../tasks/installs/debian_uca_repository.yml | 10 +- .../installs/install_debian_packages.yml | 8 +- .../installs/install_debian_rhcs_packages.yml | 6 +- .../tasks/installs/install_on_clear.yml | 4 +- .../tasks/installs/install_on_debian.yml | 14 +- .../installs/install_redhat_packages.yml | 14 +- .../tasks/installs/install_suse_packages.yml | 10 +- .../prerequisite_rhcs_cdn_install.yml | 4 +- .../installs/redhat_community_repository.yml | 20 +- .../installs/redhat_custom_repository.yml | 9 +- .../tasks/installs/redhat_dev_repository.yml | 17 +- .../tasks/installs/redhat_rhcs_repository.yml | 4 +- .../tasks/installs/suse_obs_repository.yml | 8 +- roles/ceph-common/tasks/main.yml | 56 +- roles/ceph-common/tasks/release-rhcs.yml | 36 +- roles/ceph-common/tasks/selinux.yml | 10 +- roles/ceph-config/meta/main.yml | 4 +- .../tasks/create_ceph_initial_dirs.yml | 6 +- roles/ceph-config/tasks/main.yml | 85 +- .../tasks/rgw_systemd_environment_file.yml | 10 +- roles/ceph-container-common/meta/main.yml | 4 +- .../tasks/fetch_image.yml | 32 +- roles/ceph-container-common/tasks/main.yml | 39 +- .../tasks/prerequisites.yml | 36 +- .../ceph-container-common/tasks/registry.yml | 8 +- roles/ceph-container-common/tasks/release.yml | 36 +- roles/ceph-container-engine/meta/main.yml | 4 +- roles/ceph-container-engine/tasks/main.yml | 4 +- .../pre_requisites/debian_prerequisites.yml | 22 +- .../tasks/pre_requisites/prerequisites.yml | 45 +- roles/ceph-crash/meta/main.yml | 5 +- roles/ceph-crash/tasks/main.yml | 34 +- roles/ceph-crash/tasks/systemd.yml | 6 +- roles/ceph-dashboard/meta/main.yml | 4 +- .../tasks/configure_dashboard.yml | 210 ++-- .../tasks/configure_grafana_layouts.yml | 8 +- roles/ceph-dashboard/tasks/main.yml | 8 +- roles/ceph-defaults/defaults/main.yml | 65 +- roles/ceph-defaults/meta/main.yml | 4 +- roles/ceph-defaults/tasks/main.yml | 2 +- roles/ceph-defaults/vars/main.yml | 2 +- roles/ceph-facts/meta/main.yml | 4 +- roles/ceph-facts/tasks/container_binary.yml | 8 +- .../convert_grafana_server_group_name.yml | 8 +- roles/ceph-facts/tasks/devices.yml | 46 +- roles/ceph-facts/tasks/facts.yml | 167 +-- .../tasks/get_def_crush_rule_name.yml | 8 +- roles/ceph-facts/tasks/grafana.yml | 16 +- roles/ceph-facts/tasks/main.yml | 8 +- .../ceph-facts/tasks/set_monitor_address.yml | 36 +- .../ceph-facts/tasks/set_radosgw_address.yml | 48 +- roles/ceph-fetch-keys/defaults/main.yml | 1 - roles/ceph-fetch-keys/meta/main.yml | 4 +- roles/ceph-fetch-keys/tasks/main.yml | 15 +- roles/ceph-grafana/meta/main.yml | 4 +- .../ceph-grafana/tasks/configure_grafana.yml | 64 +- roles/ceph-grafana/tasks/main.yml | 8 +- roles/ceph-grafana/tasks/setup_container.yml | 16 +- roles/ceph-grafana/tasks/systemd.yml | 6 +- roles/ceph-handler/handlers/main.yml | 102 +- roles/ceph-handler/meta/main.yml | 4 +- .../tasks/check_running_cluster.yml | 8 +- .../tasks/check_running_containers.yml | 68 +- .../tasks/check_socket_non_container.yml | 140 +-- roles/ceph-handler/tasks/handler_crash.yml | 22 +- roles/ceph-handler/tasks/handler_mdss.yml | 25 +- roles/ceph-handler/tasks/handler_mgrs.yml | 25 +- roles/ceph-handler/tasks/handler_mons.yml | 25 +- roles/ceph-handler/tasks/handler_nfss.yml | 25 +- roles/ceph-handler/tasks/handler_osds.yml | 57 +- .../tasks/handler_rbd_target_api_gw.yml | 36 +- .../ceph-handler/tasks/handler_rbdmirrors.yml | 25 +- roles/ceph-handler/tasks/handler_rgws.yml | 25 +- .../tasks/handler_tcmu_runner.yml | 18 +- roles/ceph-handler/tasks/main.yml | 36 +- roles/ceph-infra/handlers/main.yml | 18 +- roles/ceph-infra/meta/main.yml | 4 +- roles/ceph-infra/tasks/configure_firewall.yml | 117 +-- roles/ceph-infra/tasks/dashboard_firewall.yml | 48 +- roles/ceph-infra/tasks/main.yml | 24 +- roles/ceph-infra/tasks/setup_ntp.yml | 54 +- roles/ceph-iscsi-gw/defaults/main.yml | 6 +- roles/ceph-iscsi-gw/meta/main.yml | 4 +- roles/ceph-iscsi-gw/tasks/common.yml | 24 +- roles/ceph-iscsi-gw/tasks/containerized.yml | 23 +- roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml | 62 +- roles/ceph-iscsi-gw/tasks/main.yml | 24 +- .../tasks/non-container/configure_iscsi.yml | 8 +- .../tasks/non-container/postrequisites.yml | 8 +- .../tasks/non-container/prerequisites.yml | 57 +- roles/ceph-iscsi-gw/tasks/systemd.yml | 8 +- roles/ceph-mds/defaults/main.yml | 6 +- roles/ceph-mds/meta/main.yml | 4 +- roles/ceph-mds/tasks/common.yml | 12 +- roles/ceph-mds/tasks/containerized.yml | 26 +- .../ceph-mds/tasks/create_mds_filesystems.yml | 7 +- roles/ceph-mds/tasks/main.yml | 16 +- roles/ceph-mds/tasks/non_containerized.yml | 27 +- roles/ceph-mds/tasks/systemd.yml | 13 +- roles/ceph-mgr/defaults/main.yml | 6 +- roles/ceph-mgr/meta/main.yml | 4 +- roles/ceph-mgr/tasks/common.yml | 30 +- roles/ceph-mgr/tasks/main.yml | 20 +- roles/ceph-mgr/tasks/mgr_modules.yml | 22 +- roles/ceph-mgr/tasks/pre_requisite.yml | 26 +- roles/ceph-mgr/tasks/start_mgr.yml | 29 +- roles/ceph-mgr/tasks/systemd.yml | 13 +- roles/ceph-mon/defaults/main.yml | 6 +- roles/ceph-mon/meta/main.yml | 4 +- roles/ceph-mon/tasks/ceph_keys.yml | 6 +- roles/ceph-mon/tasks/deploy_monitors.yml | 77 +- roles/ceph-mon/tasks/main.yml | 20 +- roles/ceph-mon/tasks/secure_cluster.yml | 12 +- roles/ceph-mon/tasks/start_monitor.yml | 21 +- roles/ceph-mon/tasks/systemd.yml | 21 +- roles/ceph-nfs/defaults/main.yml | 28 +- roles/ceph-nfs/meta/main.yml | 4 +- roles/ceph-nfs/tasks/create_rgw_nfs_user.yml | 6 +- roles/ceph-nfs/tasks/main.yml | 50 +- .../tasks/pre_requisite_container.yml | 53 +- .../tasks/pre_requisite_non_container.yml | 32 +- .../pre_requisite_non_container_debian.yml | 79 +- .../pre_requisite_non_container_red_hat.yml | 35 +- roles/ceph-nfs/tasks/start_nfs.yml | 102 +- roles/ceph-nfs/tasks/systemd.yml | 6 +- roles/ceph-nfs/templates/systemd-run.j2 | 27 + roles/ceph-node-exporter/meta/main.yml | 4 +- roles/ceph-node-exporter/tasks/main.yml | 4 +- .../tasks/setup_container.yml | 12 +- roles/ceph-node-exporter/tasks/systemd.yml | 6 +- roles/ceph-osd/defaults/main.yml | 36 +- roles/ceph-osd/meta/main.yml | 4 +- roles/ceph-osd/tasks/common.yml | 11 +- roles/ceph-osd/tasks/crush_rules.yml | 19 +- roles/ceph-osd/tasks/main.yml | 54 +- roles/ceph-osd/tasks/openstack_config.yml | 20 +- roles/ceph-osd/tasks/scenarios/lvm-batch.yml | 6 +- roles/ceph-osd/tasks/scenarios/lvm.yml | 14 +- roles/ceph-osd/tasks/start_osds.yml | 35 +- roles/ceph-osd/tasks/system_tuning.yml | 24 +- roles/ceph-osd/tasks/systemd.yml | 21 +- .../ceph-prometheus/files/ceph_dashboard.yml | 228 ++--- roles/ceph-prometheus/handlers/main.yml | 4 +- roles/ceph-prometheus/meta/main.yml | 4 +- roles/ceph-prometheus/tasks/main.yml | 45 +- .../ceph-prometheus/tasks/setup_container.yml | 8 +- roles/ceph-prometheus/tasks/systemd.yml | 8 +- roles/ceph-rbd-mirror/defaults/main.yml | 6 +- roles/ceph-rbd-mirror/meta/main.yml | 4 +- .../tasks/configure_mirroring.yml | 62 +- roles/ceph-rbd-mirror/tasks/main.yml | 33 +- .../tasks/start_container_rbd_mirror.yml | 14 +- roles/ceph-rbd-mirror/tasks/systemd.yml | 21 +- roles/ceph-rgw-loadbalancer/defaults/main.yml | 10 +- roles/ceph-rgw-loadbalancer/handlers/main.yml | 8 +- roles/ceph-rgw-loadbalancer/meta/main.yml | 4 +- roles/ceph-rgw-loadbalancer/tasks/main.yml | 8 +- .../tasks/pre_requisite.yml | 34 +- .../tasks/start_rgw_loadbalancer.yml | 12 +- roles/ceph-rgw/defaults/main.yml | 58 +- roles/ceph-rgw/handlers/main.yml | 4 +- roles/ceph-rgw/meta/main.yml | 4 +- roles/ceph-rgw/tasks/common.yml | 18 +- roles/ceph-rgw/tasks/main.yml | 24 +- roles/ceph-rgw/tasks/openstack-keystone.yml | 18 +- roles/ceph-rgw/tasks/pre_requisite.yml | 24 +- roles/ceph-rgw/tasks/rgw_create_pools.yml | 8 +- roles/ceph-rgw/tasks/start_docker_rgw.yml | 14 +- roles/ceph-rgw/tasks/start_radosgw.yml | 23 +- roles/ceph-rgw/tasks/systemd.yml | 21 +- roles/ceph-validate/meta/main.yml | 4 +- roles/ceph-validate/tasks/check_devices.yml | 61 +- roles/ceph-validate/tasks/check_eth_mon.yml | 16 +- roles/ceph-validate/tasks/check_eth_rgw.yml | 16 +- .../ceph-validate/tasks/check_ipaddr_mon.yml | 4 +- roles/ceph-validate/tasks/check_iscsi.yml | 22 +- roles/ceph-validate/tasks/check_nfs.yml | 8 +- roles/ceph-validate/tasks/check_pools.yml | 4 +- roles/ceph-validate/tasks/check_rbdmirror.yml | 10 +- .../ceph-validate/tasks/check_repository.yml | 12 +- .../tasks/check_rgw_multisite.yml | 41 +- roles/ceph-validate/tasks/check_rgw_pools.yml | 12 +- roles/ceph-validate/tasks/check_system.yml | 42 +- roles/ceph-validate/tasks/main.yml | 133 +-- 245 files changed, 5474 insertions(+), 4931 deletions(-) create mode 100644 roles/ceph-nfs/templates/systemd-run.j2 diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml index 6e833ed676..e8fa646cb8 100644 --- a/.github/workflows/ansible-lint.yml +++ b/.github/workflows/ansible-lint.yml @@ -10,10 +10,7 @@ jobs: with: python-version: '3.10' architecture: x64 - - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint==6.16.0 netaddr + - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint netaddr - run: ansible-galaxy install -r requirements.yml - - run: ansible-lint -x 106,204,205,208 -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site-container.yml.sample dashboard.yml - - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample --syntax-check --list-tasks -vv - - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site-container.yml.sample --syntax-check --list-tasks -vv - - run: ansible-playbook -i ./tests/functional/all_daemons/hosts dashboard.yml --syntax-check --list-tasks -vv - - run: ansible-playbook -i ./tests/functional/all_daemons/hosts infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv + - run: ansible-lint -x 'yaml[line-length],role-name,run-once' -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site.yml.sample dashboard.yml + - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample site-container.yml.sample dashboard.yml infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv diff --git a/dashboard.yml b/dashboard.yml index 13624728af..e022fa81d9 100644 --- a/dashboard.yml +++ b/dashboard.yml @@ -1,5 +1,6 @@ --- -- hosts: +- name: Deploy node_exporter + hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" - "{{ mds_group_name|default('mdss') }}" @@ -12,75 +13,91 @@ gather_facts: false become: true pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults tags: ['ceph_update_config'] - - name: set ceph node exporter install 'In Progress' + - name: Set ceph node exporter install 'In Progress' run_once: true - set_stats: + ansible.builtin.set_stats: data: installer_phase_ceph_node_exporter: status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" tasks: - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tags: ['ceph_update_config'] - - import_role: + + - name: Import ceph-container-engine + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common tasks_from: registry when: - not containerized_deployment | bool - ceph_docker_registry_auth | bool - - import_role: + + - name: Import ceph-node-exporter role + ansible.builtin.import_role: name: ceph-node-exporter post_tasks: - - name: set ceph node exporter install 'Complete' + - name: Set ceph node exporter install 'Complete' run_once: true - set_stats: + ansible.builtin.set_stats: data: installer_phase_ceph_node_exporter: status: "Complete" end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" -- hosts: "{{ monitoring_group_name | default('monitoring') }}" +- name: Deploy grafana and prometheus + hosts: "{{ monitoring_group_name | default('monitoring') }}" gather_facts: false become: true pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults tags: ['ceph_update_config'] - - name: set ceph grafana install 'In Progress' + - name: Set ceph grafana install 'In Progress' run_once: true - set_stats: + ansible.builtin.set_stats: data: installer_phase_ceph_grafana: status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" tasks: - - import_role: - name: ceph-facts - tags: ['ceph_update_config'] - - import_role: + # - ansible.builtin.import_role: + # name: ceph-facts + # tags: ['ceph_update_config'] + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: grafana tags: ['ceph_update_config'] - - import_role: + + - name: Import ceph-prometheus role + ansible.builtin.import_role: name: ceph-prometheus - - import_role: + + - name: Import ceph-grafana role + ansible.builtin.import_role: name: ceph-grafana post_tasks: - - name: set ceph grafana install 'Complete' + - name: Set ceph grafana install 'Complete' run_once: true - set_stats: + ansible.builtin.set_stats: data: installer_phase_ceph_grafana: status: "Complete" @@ -88,37 +105,44 @@ # using groups[] here otherwise it can't fallback to the mon if there's no mgr group. # adding an additional | default(omit) in case where no monitors are present (external ceph cluster) -- hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}" +- name: Deploy dashboard + hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}" gather_facts: false become: true pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults tags: ['ceph_update_config'] - - name: set ceph dashboard install 'In Progress' + - name: Set ceph dashboard install 'In Progress' run_once: true - set_stats: + ansible.builtin.set_stats: data: installer_phase_ceph_dashboard: status: "In Progress" start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" tasks: - - import_role: - name: ceph-facts - tags: ['ceph_update_config'] - - import_role: + # - name: Import ceph-facts role + # ansible.builtin.import_role: + # name: ceph-facts + # tags: ['ceph_update_config'] + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: grafana tags: ['ceph_update_config'] - - import_role: + + - name: Import ceph-dashboard role + ansible.builtin.import_role: name: ceph-dashboard post_tasks: - - name: set ceph dashboard install 'Complete' + - name: Set ceph dashboard install 'Complete' run_once: true - set_stats: + ansible.builtin.set_stats: data: installer_phase_ceph_dashboard: status: "Complete" diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 3dc73f6a33..22ba3fe6db 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -74,7 +74,7 @@ dummy: # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. -#configure_firewall: True +#configure_firewall: true # Open ports on corresponding nodes if firewall is installed on it #ceph_mon_firewall_zone: public @@ -120,7 +120,7 @@ dummy: # This variable determines if ceph packages can be updated. If False, the # package resources will use "state=present". If True, they will use # "state=latest". -#upgrade_ceph_packages: False +#upgrade_ceph_packages: false #ceph_use_distro_backports: false # DEBIAN ONLY #ceph_directories_mode: "0755" @@ -171,7 +171,7 @@ dummy: # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # for more info read: https://github.com/ceph/ceph-ansible/issues/305 -#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" +# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0) @@ -229,7 +229,7 @@ dummy: # a URL to the .repo file to be installed on the targets. For deb, # ceph_custom_repo should be the URL to the repo base. # -#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc +# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc #ceph_custom_repo: https://server.domain.com/ceph-custom-repo @@ -238,14 +238,14 @@ dummy: # Enabled when ceph_repository == 'local' # # Path to DESTDIR of the ceph install -#ceph_installation_dir: "/path/to/ceph_installation/" +# ceph_installation_dir: "/path/to/ceph_installation/" # Whether or not to use installer script rundep_installer.sh # This script takes in rundep and installs the packages line by line onto the machine # If this is set to false then it is assumed that the machine ceph is being copied onto will already have # all runtime dependencies installed -#use_installer: false +# use_installer: false # Root directory for ceph-ansible -#ansible_dir: "/path/to/ceph-ansible" +# ansible_dir: "/path/to/ceph-ansible" ###################### @@ -328,12 +328,12 @@ dummy: #ip_version: ipv4 #mon_host_v1: -# enabled: True +# enabled: true # suffix: ':6789' #mon_host_v2: # suffix: ':3300' -#enable_ceph_volume_debug: False +#enable_ceph_volume_debug: false ########## # CEPHFS # @@ -405,7 +405,7 @@ dummy: ## Testing mode # enable this mode _only_ when you have a single node # if you don't want it keep the option commented -#common_single_host_mode: true +# common_single_host_mode: true ## Handlers - restarting daemons after a config change # if for whatever reasons the content of your ceph configuration changes @@ -527,16 +527,16 @@ dummy: #ceph_docker_image_tag: latest-main #ceph_docker_registry: quay.io #ceph_docker_registry_auth: false -#ceph_docker_registry_username: -#ceph_docker_registry_password: -#ceph_docker_http_proxy: -#ceph_docker_https_proxy: +# ceph_docker_registry_username: +# ceph_docker_registry_password: +# ceph_docker_http_proxy: +# ceph_docker_https_proxy: #ceph_docker_no_proxy: "localhost,127.0.0.1" ## Client only docker image - defaults to {{ ceph_docker_image }} #ceph_client_docker_image: "{{ ceph_docker_image }}" #ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" #ceph_client_docker_registry: "{{ ceph_docker_registry }}" -#containerized_deployment: False +#containerized_deployment: false #container_binary: #timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" @@ -563,7 +563,7 @@ dummy: # name: "images" # rule_name: "my_replicated_rule" # application: "rbd" -# pg_autoscale_mode: False +# pg_autoscale_mode: false # pg_num: 16 # pgp_num: 16 # target_size_ratio: 0.2 @@ -613,7 +613,7 @@ dummy: ############# # DASHBOARD # ############# -#dashboard_enabled: True +#dashboard_enabled: true # Choose http or https # For https, you should set dashboard.crt/key and grafana.crt/key # If you define the dashboard_crt and dashboard_key variables, but leave them as '', @@ -625,7 +625,7 @@ dummy: #dashboard_admin_user: admin #dashboard_admin_user_ro: false # This variable must be set with a strong custom password when dashboard_enabled is True -#dashboard_admin_password: p@ssw0rd +# dashboard_admin_password: p@ssw0rd # We only need this for SSL (https) connections #dashboard_crt: '' #dashboard_key: '' @@ -634,7 +634,7 @@ dummy: #dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" #dashboard_rgw_api_user_id: ceph-dashboard #dashboard_rgw_api_admin_resource: '' -#dashboard_rgw_api_no_ssl_verify: False +#dashboard_rgw_api_no_ssl_verify: false #dashboard_frontend_vip: '' #dashboard_disabled_features: [] #prometheus_frontend_vip: '' @@ -643,7 +643,7 @@ dummy: #node_exporter_port: 9100 #grafana_admin_user: admin # This variable must be set with a strong custom password when dashboard_enabled is True -#grafana_admin_password: admin +# grafana_admin_password: admin # We only need this for SSL (https) connections #grafana_crt: '' #grafana_key: '' @@ -675,7 +675,7 @@ dummy: #grafana_plugins: # - vonage-status-panel # - grafana-piechart-panel -#grafana_allow_embedding: True +#grafana_allow_embedding: true #grafana_port: 3000 #grafana_network: "{{ public_network }}" #grafana_conf_overrides: {} @@ -691,7 +691,7 @@ dummy: #prometheus_conf_overrides: {} # Uncomment out this variable if you need to customize the retention period for prometheus storage. # set it to '30d' if you want to retain 30 days of data. -#prometheus_storage_tsdb_retention_time: 15d +# prometheus_storage_tsdb_retention_time: 15d #alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" #alertmanager_container_cpu_period: 100000 #alertmanager_container_cpu_cores: 2 @@ -749,11 +749,11 @@ dummy: # # Example: # -#rbd_devices: -# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } +# rbd_devices: +# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } #rbd_devices: {} # client_connections defines the client ACL's to restrict client access to specific LUNs @@ -767,20 +767,19 @@ dummy: # # Example: # -#client_connections: -# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } -# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } +# client_connections: +# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } +# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } #client_connections: {} -#no_log_on_ceph_key_tasks: True +#no_log_on_ceph_key_tasks: true ############### # DEPRECATION # ############### - ###################################################### # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # *DO NOT* MODIFY THEM # @@ -788,5 +787,5 @@ dummy: #container_exec_cmd: #docker: false -#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" +#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" diff --git a/group_vars/clients.yml.sample b/group_vars/clients.yml.sample index 03aee9076f..f358a402a4 100644 --- a/group_vars/clients.yml.sample +++ b/group_vars/clients.yml.sample @@ -45,6 +45,6 @@ dummy: # - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ... #keys: -# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" } -# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" } +# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" } +# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" } diff --git a/group_vars/iscsigws.yml.sample b/group_vars/iscsigws.yml.sample index 67c63a9fa3..59ab5e1917 100644 --- a/group_vars/iscsigws.yml.sample +++ b/group_vars/iscsigws.yml.sample @@ -13,13 +13,13 @@ dummy: # GENERAL # ########### # Whether or not to generate secure certificate to iSCSI gateway nodes -#generate_crt: False +#generate_crt: false #iscsi_conf_overrides: {} #iscsi_pool_name: rbd -#iscsi_pool_size: 3 +# iscsi_pool_size: 3 -#copy_admin_key: True +#copy_admin_key: true ################## # RBD-TARGET-API # diff --git a/group_vars/mdss.yml.sample b/group_vars/mdss.yml.sample index 14b1bfb8ac..592421b5c1 100644 --- a/group_vars/mdss.yml.sample +++ b/group_vars/mdss.yml.sample @@ -43,7 +43,7 @@ dummy: # ceph_mds_systemd_overrides will override the systemd settings # for the ceph-mds services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_mds_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_mds_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/group_vars/mgrs.yml.sample b/group_vars/mgrs.yml.sample index 4a1d64f058..252a5a6a6f 100644 --- a/group_vars/mgrs.yml.sample +++ b/group_vars/mgrs.yml.sample @@ -54,7 +54,7 @@ dummy: # ceph_mgr_systemd_overrides will override the systemd settings # for the ceph-mgr services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_mgr_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_mgr_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/group_vars/mons.yml.sample b/group_vars/mons.yml.sample index ad59172b79..ac66172d09 100644 --- a/group_vars/mons.yml.sample +++ b/group_vars/mons.yml.sample @@ -64,7 +64,7 @@ dummy: # ceph_mon_systemd_overrides will override the systemd settings # for the ceph-mon services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_mon_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_mon_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/group_vars/nfss.yml.sample b/group_vars/nfss.yml.sample index 21ed92e185..1fc46ff1a7 100644 --- a/group_vars/nfss.yml.sample +++ b/group_vars/nfss.yml.sample @@ -92,8 +92,8 @@ dummy: #ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p" # Note: keys are optional and can be generated, but not on containerized, where # they must be configered. -#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" -#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" +# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" #rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} ################### @@ -106,19 +106,19 @@ dummy: # https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example # # Example: -#CACHEINODE { -# #Entries_HWMark = 100000; -#} +# CACHEINODE { +# # Entries_HWMark = 100000; +# } # -#ganesha_core_param_overrides: -#ganesha_ceph_export_overrides: -#ganesha_rgw_export_overrides: -#ganesha_rgw_section_overrides: -#ganesha_log_overrides: -#ganesha_conf_overrides: | -# CACHEINODE { -# #Entries_HWMark = 100000; -# } +# ganesha_core_param_overrides: +# ganesha_ceph_export_overrides: +# ganesha_rgw_export_overrides: +# ganesha_rgw_section_overrides: +# ganesha_log_overrides: +# ganesha_conf_overrides: | +# CACHEINODE { +# # Entries_HWMark = 100000; +# } ########## # DOCKER # diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index a57c1f3f40..694b03ca5b 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -33,31 +33,31 @@ dummy: # All scenario(except 3rd) inherit from the following device declaration # Note: This scenario uses the ceph-volume lvm batch method to provision OSDs -#devices: -# - /dev/sdb -# - /dev/sdc -# - /dev/sdd -# - /dev/sde +# devices: +# - /dev/sdb +# - /dev/sdc +# - /dev/sdd +# - /dev/sde #devices: [] # Declare devices to be used as block.db devices -#dedicated_devices: -# - /dev/sdx -# - /dev/sdy +# dedicated_devices: +# - /dev/sdx +# - /dev/sdy #dedicated_devices: [] # Declare devices to be used as block.wal devices -#bluestore_wal_devices: -# - /dev/nvme0n1 -# - /dev/nvme0n2 +# bluestore_wal_devices: +# - /dev/nvme0n1 +# - /dev/nvme0n2 #bluestore_wal_devices: [] -#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. +# 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. # Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using @@ -68,7 +68,7 @@ dummy: # Encrypt your OSD device using dmcrypt # If set to True, no matter which osd_objecstore you use the data will be encrypted -#dmcrypt: False +#dmcrypt: true # Use ceph-volume to create OSDs from logical volumes. # lvm_volumes is a list of dictionaries. @@ -177,8 +177,8 @@ dummy: # NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16 # NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17 # then, the following would run the OSD on the first NUMA node only. -#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" -#ceph_osd_docker_cpuset_mems: "0" +# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" +# ceph_osd_docker_cpuset_mems: "0" # PREPARE DEVICE # @@ -199,9 +199,9 @@ dummy: # ceph_osd_systemd_overrides will override the systemd settings # for the ceph-osd services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_osd_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_osd_systemd_overrides: +# Service: +# PrivateDevices: false ########### diff --git a/group_vars/rbdmirrors.yml.sample b/group_vars/rbdmirrors.yml.sample index f355c5ebca..e88f9e99b3 100644 --- a/group_vars/rbdmirrors.yml.sample +++ b/group_vars/rbdmirrors.yml.sample @@ -49,7 +49,7 @@ dummy: # ceph_rbd_mirror_systemd_overrides will override the systemd settings # for the ceph-rbd-mirror services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_rbd_mirror_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_rbd_mirror_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/group_vars/rgwloadbalancers.yml.sample b/group_vars/rgwloadbalancers.yml.sample index a026c939dd..b3132c5239 100644 --- a/group_vars/rgwloadbalancers.yml.sample +++ b/group_vars/rgwloadbalancers.yml.sample @@ -26,10 +26,10 @@ dummy: # - no-tlsv11 # - no-tls-tickets # -#virtual_ips: -# - 192.168.238.250 -# - 192.168.238.251 +# virtual_ips: +# - 192.168.238.250 +# - 192.168.238.251 # -#virtual_ip_netmask: 24 -#virtual_ip_interface: ens33 +# virtual_ip_netmask: 24 +# virtual_ip_interface: ens33 diff --git a/group_vars/rgws.yml.sample b/group_vars/rgws.yml.sample index 15c3bf8a58..885f40c87d 100644 --- a/group_vars/rgws.yml.sample +++ b/group_vars/rgws.yml.sample @@ -45,30 +45,30 @@ dummy: # If the key doesn't exist it falls back to the default replicated_rule. # This only works for replicated pool type not erasure. -#rgw_create_pools: -# "{{ rgw_zone }}.rgw.buckets.data": -# pg_num: 64 -# type: ec -# ec_profile: myecprofile -# ec_k: 5 -# ec_m: 3 -# "{{ rgw_zone }}.rgw.buckets.index": -# pg_num: 16 -# size: 3 -# type: replicated -# "{{ rgw_zone }}.rgw.meta": -# pg_num: 8 -# size: 3 -# type: replicated -# "{{ rgw_zone }}.rgw.log": -# pg_num: 8 -# size: 3 -# type: replicated -# "{{ rgw_zone }}.rgw.control": -# pg_num: 8 -# size: 3 -# type: replicated -# rule_name: foo +# rgw_create_pools: +# "{{ rgw_zone }}.rgw.buckets.data": +# pg_num: 64 +# type: ec +# ec_profile: myecprofile +# ec_k: 5 +# ec_m: 3 +# "{{ rgw_zone }}.rgw.buckets.index": +# pg_num: 16 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.meta": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.log": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.control": +# pg_num: 8 +# size: 3 +# type: replicated +# rule_name: foo ########## @@ -81,8 +81,8 @@ dummy: # These options can be passed using the 'ceph_rgw_docker_extra_env' variable. #ceph_rgw_docker_memory_limit: "4096m" #ceph_rgw_docker_cpu_limit: 8 -#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" -#ceph_rgw_docker_cpuset_mems: "0" +# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" +# ceph_rgw_docker_cpuset_mems: "0" #ceph_rgw_docker_extra_env: #ceph_config_keys: [] # DON'T TOUCH ME @@ -94,7 +94,7 @@ dummy: # ceph_rgw_systemd_overrides will override the systemd settings # for the ceph-rgw services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_rgw_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_rgw_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 98c0f0ce5d..8d18aec6a2 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -74,7 +74,7 @@ dummy: # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. -#configure_firewall: True +#configure_firewall: true # Open ports on corresponding nodes if firewall is installed on it #ceph_mon_firewall_zone: public @@ -120,7 +120,7 @@ dummy: # This variable determines if ceph packages can be updated. If False, the # package resources will use "state=present". If True, they will use # "state=latest". -#upgrade_ceph_packages: False +#upgrade_ceph_packages: false #ceph_use_distro_backports: false # DEBIAN ONLY #ceph_directories_mode: "0755" @@ -171,7 +171,7 @@ ceph_repository: rhcs # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # for more info read: https://github.com/ceph/ceph-ansible/issues/305 -#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" +# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0) @@ -229,7 +229,7 @@ ceph_iscsi_config_dev: false # a URL to the .repo file to be installed on the targets. For deb, # ceph_custom_repo should be the URL to the repo base. # -#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc +# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc #ceph_custom_repo: https://server.domain.com/ceph-custom-repo @@ -238,14 +238,14 @@ ceph_iscsi_config_dev: false # Enabled when ceph_repository == 'local' # # Path to DESTDIR of the ceph install -#ceph_installation_dir: "/path/to/ceph_installation/" +# ceph_installation_dir: "/path/to/ceph_installation/" # Whether or not to use installer script rundep_installer.sh # This script takes in rundep and installs the packages line by line onto the machine # If this is set to false then it is assumed that the machine ceph is being copied onto will already have # all runtime dependencies installed -#use_installer: false +# use_installer: false # Root directory for ceph-ansible -#ansible_dir: "/path/to/ceph-ansible" +# ansible_dir: "/path/to/ceph-ansible" ###################### @@ -328,12 +328,12 @@ ceph_iscsi_config_dev: false #ip_version: ipv4 #mon_host_v1: -# enabled: True +# enabled: true # suffix: ':6789' #mon_host_v2: # suffix: ':3300' -#enable_ceph_volume_debug: False +#enable_ceph_volume_debug: false ########## # CEPHFS # @@ -405,7 +405,7 @@ ceph_iscsi_config_dev: false ## Testing mode # enable this mode _only_ when you have a single node # if you don't want it keep the option commented -#common_single_host_mode: true +# common_single_host_mode: true ## Handlers - restarting daemons after a config change # if for whatever reasons the content of your ceph configuration changes @@ -527,10 +527,10 @@ ceph_docker_image: "rhceph/rhceph-5-rhel8" ceph_docker_image_tag: "latest" ceph_docker_registry: "registry.redhat.io" ceph_docker_registry_auth: true -#ceph_docker_registry_username: -#ceph_docker_registry_password: -#ceph_docker_http_proxy: -#ceph_docker_https_proxy: +# ceph_docker_registry_username: +# ceph_docker_registry_password: +# ceph_docker_http_proxy: +# ceph_docker_https_proxy: #ceph_docker_no_proxy: "localhost,127.0.0.1" ## Client only docker image - defaults to {{ ceph_docker_image }} #ceph_client_docker_image: "{{ ceph_docker_image }}" @@ -563,7 +563,7 @@ containerized_deployment: true # name: "images" # rule_name: "my_replicated_rule" # application: "rbd" -# pg_autoscale_mode: False +# pg_autoscale_mode: false # pg_num: 16 # pgp_num: 16 # target_size_ratio: 0.2 @@ -613,7 +613,7 @@ containerized_deployment: true ############# # DASHBOARD # ############# -#dashboard_enabled: True +#dashboard_enabled: true # Choose http or https # For https, you should set dashboard.crt/key and grafana.crt/key # If you define the dashboard_crt and dashboard_key variables, but leave them as '', @@ -625,7 +625,7 @@ containerized_deployment: true #dashboard_admin_user: admin #dashboard_admin_user_ro: false # This variable must be set with a strong custom password when dashboard_enabled is True -#dashboard_admin_password: p@ssw0rd +# dashboard_admin_password: p@ssw0rd # We only need this for SSL (https) connections #dashboard_crt: '' #dashboard_key: '' @@ -634,7 +634,7 @@ containerized_deployment: true #dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" #dashboard_rgw_api_user_id: ceph-dashboard #dashboard_rgw_api_admin_resource: '' -#dashboard_rgw_api_no_ssl_verify: False +#dashboard_rgw_api_no_ssl_verify: false #dashboard_frontend_vip: '' #dashboard_disabled_features: [] #prometheus_frontend_vip: '' @@ -643,7 +643,7 @@ node_exporter_container_image: registry.redhat.io/openshift4/ose-prometheus-node #node_exporter_port: 9100 #grafana_admin_user: admin # This variable must be set with a strong custom password when dashboard_enabled is True -#grafana_admin_password: admin +# grafana_admin_password: admin # We only need this for SSL (https) connections #grafana_crt: '' #grafana_key: '' @@ -675,7 +675,7 @@ grafana_container_image: registry.redhat.io/rhceph/rhceph-5-dashboard-rhel8:5 #grafana_plugins: # - vonage-status-panel # - grafana-piechart-panel -#grafana_allow_embedding: True +#grafana_allow_embedding: true #grafana_port: 3000 #grafana_network: "{{ public_network }}" #grafana_conf_overrides: {} @@ -691,7 +691,7 @@ prometheus_container_image: registry.redhat.io/openshift4/ose-prometheus:v4.6 #prometheus_conf_overrides: {} # Uncomment out this variable if you need to customize the retention period for prometheus storage. # set it to '30d' if you want to retain 30 days of data. -#prometheus_storage_tsdb_retention_time: 15d +# prometheus_storage_tsdb_retention_time: 15d alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6 #alertmanager_container_cpu_period: 100000 #alertmanager_container_cpu_cores: 2 @@ -749,11 +749,11 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert # # Example: # -#rbd_devices: -# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } +# rbd_devices: +# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } #rbd_devices: {} # client_connections defines the client ACL's to restrict client access to specific LUNs @@ -767,20 +767,19 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert # # Example: # -#client_connections: -# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } -# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } +# client_connections: +# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } +# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } #client_connections: {} -#no_log_on_ceph_key_tasks: True +#no_log_on_ceph_key_tasks: true ############### # DEPRECATION # ############### - ###################################################### # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # *DO NOT* MODIFY THEM # @@ -788,5 +787,5 @@ alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alert #container_exec_cmd: #docker: false -#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" +#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" diff --git a/infrastructure-playbooks/add-mon.yml b/infrastructure-playbooks/add-mon.yml index 0d1859545f..166d9ceb60 100644 --- a/infrastructure-playbooks/add-mon.yml +++ b/infrastructure-playbooks/add-mon.yml @@ -6,26 +6,30 @@ # Ensure that all monitors are present in the mons # group in your inventory so that the ceph configuration file # is created correctly for the new OSD(s). -- hosts: mons +- name: Pre-requisites operations for adding new monitor(s) + hosts: mons gather_facts: false vars: delegate_facts_host: true become: true pre_tasks: - - import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" + - name: Import raw_install_python tasks + ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" - - name: gather facts - setup: + - name: Gather facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) - - import_role: + + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: gather and delegate facts - setup: + - name: Gather and delegate facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' @@ -36,52 +40,84 @@ run_once: true when: delegate_facts_host | bool tasks: - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-validate role + ansible.builtin.import_role: name: ceph-validate - - import_role: + + - name: Import ceph-infra role + ansible.builtin.import_role: name: ceph-infra - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine when: containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool -- hosts: mons +- name: Deploy Ceph monitors + hosts: mons gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-mon role + ansible.builtin.import_role: name: ceph-mon - - import_role: + + - name: Import ceph-crash role + ansible.builtin.import_role: name: ceph-crash when: containerized_deployment | bool -# update config files on OSD nodes -- hosts: osds +- name: Update config file on OSD nodes + hosts: osds gather_facts: true become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config diff --git a/infrastructure-playbooks/backup-and-restore-ceph-files.yml b/infrastructure-playbooks/backup-and-restore-ceph-files.yml index aec8f47f8c..f17d9b3d92 100644 --- a/infrastructure-playbooks/backup-and-restore-ceph-files.yml +++ b/infrastructure-playbooks/backup-and-restore-ceph-files.yml @@ -19,12 +19,13 @@ # ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01 # ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01 -- hosts: localhost +- name: Backup and restore Ceph files + hosts: localhost become: true gather_facts: true tasks: - - name: exit playbook, if user did not set the source node - fail: + - name: Exit playbook, if user did not set the source node + ansible.builtin.fail: msg: > "You must pass the node name: -e target_node=. The name must match what is set in your inventory." @@ -32,71 +33,73 @@ - target_node is not defined or target_node not in groups.get('all', []) - - name: exit playbook, if user did not set the backup directory - fail: + - name: Exit playbook, if user did not set the backup directory + ansible.builtin.fail: msg: > "you must pass the backup directory path: -e backup_dir=" when: backup_dir is not defined - - name: exit playbook, if user did not set the playbook mode (backup|restore) - fail: + - name: Exit playbook, if user did not set the playbook mode (backup|restore) + ansible.builtin.fail: msg: > "you must pass the mode: -e mode=" when: - mode is not defined or mode not in ['backup', 'restore'] - - name: gather facts on source node - setup: + - name: Gather facts on source node + ansible.builtin.setup: delegate_to: "{{ target_node }}" delegate_facts: true - - name: backup mode + - name: Backup mode when: mode == 'backup' block: - - name: create a temp directory + - name: Create a temp directory ansible.builtin.tempfile: state: directory suffix: ansible-archive-ceph register: tmp_dir delegate_to: "{{ target_node }}" - - name: archive files - archive: + - name: Archive files + community.general.archive: path: "{{ item }}" dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar" format: tar + mode: "0644" delegate_to: "{{ target_node }}" loop: - /etc/ceph - /var/lib/ceph - - name: create backup directory + - name: Create backup directory become: false - file: + ansible.builtin.file: path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}" state: directory + mode: "0755" - - name: backup files - fetch: + - name: Backup files + ansible.builtin.fetch: src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar" dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar" - flat: yes + flat: true loop: - /etc/ceph - /var/lib/ceph delegate_to: "{{ target_node }}" - - name: remove temp directory - file: + - name: Remove temp directory + ansible.builtin.file: path: "{{ tmp_dir.path }}" state: absent delegate_to: "{{ target_node }}" - - name: restore mode + - name: Restore mode when: mode == 'restore' block: - - name: unarchive files + - name: Unarchive files ansible.builtin.unarchive: src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar" dest: "{{ item | dirname }}" diff --git a/infrastructure-playbooks/ceph-keys.yml b/infrastructure-playbooks/ceph-keys.yml index d03c37d592..db078e3280 100644 --- a/infrastructure-playbooks/ceph-keys.yml +++ b/infrastructure-playbooks/ceph-keys.yml @@ -4,7 +4,8 @@ # # It currently runs on localhost -- hosts: localhost +- name: CephX key management examples + hosts: localhost gather_facts: false vars: cluster: ceph @@ -17,12 +18,12 @@ - client.leseb1 - client.pythonnnn keys_to_create: - - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" } - - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } - - { name: client.path, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" } + - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" }, mode: "0600" } + - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" } + - { name: client.path, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" } tasks: - - name: create ceph key(s) module + - name: Create ceph key(s) module ceph_key: name: "{{ item.name }}" caps: "{{ item.caps }}" @@ -31,7 +32,7 @@ containerized: "{{ container_exec_cmd | default(False) }}" with_items: "{{ keys_to_create }}" - - name: update ceph key(s) + - name: Update ceph key(s) ceph_key: name: "{{ item.name }}" state: update @@ -40,7 +41,7 @@ containerized: "{{ container_exec_cmd | default(False) }}" with_items: "{{ keys_to_create }}" - - name: delete ceph key(s) + - name: Delete ceph key(s) ceph_key: name: "{{ item }}" state: absent @@ -48,7 +49,7 @@ containerized: "{{ container_exec_cmd | default(False) }}" with_items: "{{ keys_to_delete }}" - - name: info ceph key(s) + - name: Info ceph key(s) ceph_key: name: "{{ item }}" state: info @@ -58,7 +59,7 @@ ignore_errors: true with_items: "{{ keys_to_info }}" - - name: list ceph key(s) + - name: List ceph key(s) ceph_key: state: list cluster: "{{ cluster }}" @@ -66,7 +67,7 @@ register: list_keys ignore_errors: true - - name: fetch_initial_keys + - name: Fetch_initial_keys # noqa: ignore-errors ceph_key: state: fetch_initial_keys cluster: "{{ cluster }}" diff --git a/infrastructure-playbooks/cephadm-adopt.yml b/infrastructure-playbooks/cephadm-adopt.yml index dcd8a4899a..8b7c6e22e3 100644 --- a/infrastructure-playbooks/cephadm-adopt.yml +++ b/infrastructure-playbooks/cephadm-adopt.yml @@ -3,19 +3,19 @@ # This playbook does a cephadm adopt for all the Ceph services # -- name: confirm whether user really meant to adopt the cluster by cephadm +- name: Confirm whether user really meant to adopt the cluster by cephadm hosts: localhost connection: local become: false gather_facts: false vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to adopt the cluster by cephadm ? default: 'no' - private: no + private: false tasks: - - name: exit playbook, if user did not mean to adopt the cluster by cephadm - fail: + - name: Exit playbook, if user did not mean to adopt the cluster by cephadm + ansible.builtin.fail: msg: > Exiting cephadm-adopt playbook, cluster was NOT adopted. To adopt the cluster, either say 'yes' on the prompt or @@ -23,17 +23,17 @@ invoking the playbook when: ireallymeanit != 'yes' - - name: import_role ceph-defaults - import_role: + - name: Import_role ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - name: check if a legacy grafana-server group exists - import_role: + - name: Check if a legacy grafana-server group exists + ansible.builtin.import_role: name: ceph-facts tasks_from: convert_grafana_server_group_name.yml when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0 -- name: gather facts and prepare system for cephadm +- name: Gather facts and prepare system for cephadm hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -45,24 +45,25 @@ - "{{ iscsi_gw_group_name|default('iscsigws') }}" - "{{ monitoring_group_name|default('monitoring') }}" become: true - any_errors_fatal: True + any_errors_fatal: true gather_facts: false vars: delegate_facts_host: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: gather facts - setup: + - name: Gather facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) - - name: gather and delegate facts - setup: + - name: Gather and delegate facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' @@ -73,23 +74,24 @@ run_once: true when: delegate_facts_host | bool - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: set_fact ceph_cmd - set_fact: + - name: Set_fact ceph_cmd + ansible.builtin.set_fact: ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:ro -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}" - - name: check pools have an application enabled - command: "{{ ceph_cmd }} health detail --format json" + - name: Check pools have an application enabled + ansible.builtin.command: "{{ ceph_cmd }} health detail --format json" register: health_detail run_once: true changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - - name: check for POOL_APP_NOT_ENABLED warning - fail: + - name: Check for POOL_APP_NOT_ENABLED warning + ansible.builtin.fail: msg: "Make sure all your pool have an application enabled." run_once: true delegate_to: localhost @@ -97,49 +99,52 @@ - (health_detail.stdout | default('{}', True) | from_json)['status'] == "HEALTH_WARN" - "'POOL_APP_NOT_ENABLED' in (health_detail.stdout | default('{}', True) | from_json)['checks']" - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: convert_grafana_server_group_name.yml - when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0 + when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0 - - name: get the ceph version - command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version" + - name: Get the ceph version + ansible.builtin.command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version" changed_when: false register: ceph_version_out - - name: set_fact ceph_version - set_fact: + - name: Set_fact ceph_version + ansible.builtin.set_fact: ceph_version: "{{ ceph_version_out.stdout.split(' ')[2] }}" - - name: fail on pre octopus ceph releases - fail: + - name: Fail on pre octopus ceph releases + ansible.builtin.fail: msg: > Your Ceph version {{ ceph_version }} is not supported for this operation. Please upgrade your cluster with the rolling_update.yml playbook first. when: ceph_version is version('15.2', '<') - - name: check if it is atomic host - stat: + - name: Check if it is atomic host + ansible.builtin.stat: path: /run/ostree-booted register: stat_ostree - - name: set_fact is_atomic - set_fact: + - name: Set_fact is_atomic + ansible.builtin.set_fact: is_atomic: "{{ stat_ostree.stat.exists }}" - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine when: not containerized_deployment | bool - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common tasks_from: registry.yml when: - not containerized_deployment | bool - ceph_docker_registry_auth | bool - - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image" - command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + - name: Pulling Ceph container image + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" changed_when: false register: docker_image until: docker_image.rc == 0 @@ -156,47 +161,47 @@ inventory_hostname in groups.get(iscsi_gw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) - - name: configure repository for installing cephadm + - name: Configure repository for installing cephadm when: containerized_deployment | bool tags: with_pkg block: - - name: set_fact ceph_origin - set_fact: + - name: Set_fact ceph_origin + ansible.builtin.set_fact: ceph_origin: repository when: ceph_origin == 'dummy' - - name: set_fact ceph_repository - set_fact: + - name: Set_fact ceph_repository + ansible.builtin.set_fact: ceph_repository: community when: ceph_repository == 'dummy' - - name: validate repository variables - import_role: + - name: Validate repository variables + ansible.builtin.import_role: name: ceph-validate tasks_from: check_repository.yml - - name: configure repository - import_role: + - name: Configure repository + ansible.builtin.import_role: name: ceph-common tasks_from: "configure_repository.yml" - - name: install cephadm requirements + - name: Install cephadm requirements tags: with_pkg - package: + ansible.builtin.package: name: ['python3', 'lvm2'] register: result until: result is succeeded - - name: install cephadm + - name: Install cephadm tags: with_pkg - package: + ansible.builtin.package: name: cephadm register: result until: result is succeeded - - name: install cephadm mgr module + - name: Install cephadm mgr module tags: with_pkg - package: + ansible.builtin.package: name: ceph-mgr-cephadm register: result until: result is succeeded @@ -204,26 +209,26 @@ - not containerized_deployment | bool - mgr_group_name in group_names - - name: get current fsid - command: "{{ ceph_cmd }} fsid" + - name: Get current fsid + ansible.builtin.command: "{{ ceph_cmd }} fsid" register: current_fsid run_once: true changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - - name: get a minimal ceph configuration - command: "{{ ceph_cmd }} config generate-minimal-conf" + - name: Get a minimal ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config generate-minimal-conf" register: minimal_config run_once: true changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" - - name: set_fact fsid - set_fact: + - name: Set_fact fsid + ansible.builtin.set_fact: fsid: "{{ current_fsid.stdout }}" run_once: true - - name: enable cephadm mgr module + - name: Enable cephadm mgr module ceph_mgr_module: name: cephadm cluster: "{{ cluster }}" @@ -234,14 +239,14 @@ run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set cephadm as orchestrator backend - command: "{{ ceph_cmd }} orch set backend cephadm" + - name: Set cephadm as orchestrator backend + ansible.builtin.command: "{{ ceph_cmd }} orch set backend cephadm" changed_when: false run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' - - name: check if there is an existing ssh keypair - stat: + - name: Check if there is an existing ssh keypair + ansible.builtin.stat: path: "{{ item }}" loop: - "{{ cephadm_ssh_priv_key_path }}" @@ -251,128 +256,129 @@ run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set fact - set_fact: + - name: Set fact + ansible.builtin.set_fact: stat_ssh_key_pair: "{{ ssh_keys.results | map(attribute='stat.exists') | list }}" - - name: fail if either ssh public or private key is missing - fail: + - name: Fail if either ssh public or private key is missing + ansible.builtin.fail: msg: "One part of the ssh keypair of user {{ cephadm_ssh_user }} is missing" when: - false in stat_ssh_key_pair - true in stat_ssh_key_pair - - name: generate cephadm ssh key if there is none - command: "{{ ceph_cmd }} cephadm generate-key" + - name: Generate cephadm ssh key if there is none + ansible.builtin.command: "{{ ceph_cmd }} cephadm generate-key" when: not true in stat_ssh_key_pair changed_when: false run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' - - name: use existing user keypair for remote connections + - name: Use existing user keypair for remote connections when: not false in stat_ssh_key_pair delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - command: > + ansible.builtin.command: > {{ container_binary + ' run --rm --net=host --security-opt label=disable -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:ro -v /var/run/ceph:/var/run/ceph:z -v ' + item.1 + ':/etc/ceph/cephadm.' + item.0 + ':ro --entrypoint=ceph '+ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }} cephadm set-{{ item.0 }}-key -i /etc/ceph/cephadm.{{ item.0 }} + changed_when: false with_together: - - [ 'pub', 'priv' ] - - [ '{{ cephadm_ssh_pub_key_path }}', '{{ cephadm_ssh_priv_key_path }}' ] + - ['pub', 'priv'] + - ['{{ cephadm_ssh_pub_key_path }}', '{{ cephadm_ssh_priv_key_path }}'] - - name: get the cephadm ssh pub key - command: "{{ ceph_cmd }} cephadm get-pub-key" + - name: Get the cephadm ssh pub key + ansible.builtin.command: "{{ ceph_cmd }} cephadm get-pub-key" changed_when: false run_once: true register: cephadm_pubpkey delegate_to: '{{ groups[mon_group_name][0] }}' - - name: allow cephadm key for {{ cephadm_ssh_user }} account - authorized_key: + - name: Allow cephadm key + ansible.posix.authorized_key: user: "{{ cephadm_ssh_user }}" key: '{{ cephadm_pubpkey.stdout }}' - - name: set cephadm ssh user to {{ cephadm_ssh_user }} - command: "{{ ceph_cmd }} cephadm set-user {{ cephadm_ssh_user }}" + - name: Set cephadm ssh user to {{ cephadm_ssh_user }} + ansible.builtin.command: "{{ ceph_cmd }} cephadm set-user {{ cephadm_ssh_user }}" changed_when: false run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" - - name: run cephadm prepare-host - command: cephadm prepare-host + - name: Run cephadm prepare-host + ansible.builtin.command: cephadm prepare-host changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set default container image in ceph configuration - command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + - name: Set default container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" changed_when: false run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set container image base in ceph configuration - command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" + - name: Set container image base in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" changed_when: false run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set dashboard container image in ceph mgr configuration + - name: Set dashboard container image in ceph mgr configuration when: dashboard_enabled | bool run_once: true block: - - name: set alertmanager container image in ceph configuration - command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" + - name: Set alertmanager container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set grafana container image in ceph configuration - command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" + - name: Set grafana container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set node-exporter container image in ceph configuration - command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" + - name: Set node-exporter container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' - - name: set prometheus container image in ceph configuration - command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" + - name: Set prometheus container image in ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' - - name: enable the osd memory autotune for hci environment - command: "{{ ceph_cmd }} config set osd osd_memory_target_autotune true" + - name: Enable the osd memory autotune for hci environment + ansible.builtin.command: "{{ ceph_cmd }} config set osd osd_memory_target_autotune true" changed_when: false run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' when: is_hci | bool - - name: set autotune_memory_target_ratio - command: "{{ ceph_cmd }} config set mgr mgr/cephadm/autotune_memory_target_ratio {{ '0.2' if is_hci | bool else '0.7' }}" + - name: Set autotune_memory_target_ratio + ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/autotune_memory_target_ratio {{ '0.2' if is_hci | bool else '0.7' }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: manage nodes with cephadm - ipv4 - command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | first }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}" + - name: Manage nodes with cephadm - ipv4 + ansible.builtin.command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | first }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' when: cephadm_mgmt_network.split(',')[0] is ansible.utils.ipv4 - - name: manage nodes with cephadm - ipv6 - command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}" + - name: Manage nodes with cephadm - ipv6 + ansible.builtin.command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' when: cephadm_mgmt_network.split(',')[0] is ansible.utils.ipv6 - - name: add ceph label for core component - command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['nodename'] }} ceph" + - name: Add ceph label for core component + ansible.builtin.command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['nodename'] }} ceph" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' when: inventory_hostname in groups.get(mon_group_name, []) or @@ -382,7 +388,7 @@ inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(rbdmirror_group_name, []) - - name: get the client.admin keyring + - name: Get the client.admin keyring ceph_key: name: client.admin cluster: "{{ cluster }}" @@ -395,8 +401,8 @@ delegate_to: '{{ groups[mon_group_name][0] }}' register: client_admin_keyring - - name: copy the client.admin keyring - copy: + - name: Copy the client.admin keyring + ansible.builtin.copy: dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" content: "{{ client_admin_keyring.stdout + '\n' }}" owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" @@ -411,8 +417,8 @@ - "{{ groups.get(mgr_group_name, []) }}" - "{{ groups.get(rbdmirror_group_name, []) }}" - - name: assimilate ceph configuration - command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf" + - name: Assimilate ceph configuration + ansible.builtin.command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf" changed_when: false when: inventory_hostname in groups.get(mon_group_name, []) or inventory_hostname in groups.get(osd_group_name, []) or @@ -421,20 +427,20 @@ inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(rbdmirror_group_name, []) - - name: set_fact cephadm_cmd - set_fact: + - name: Set_fact cephadm_cmd + ansible.builtin.set_fact: cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}" - - name: set container registry info - command: "{{ ceph_cmd }} cephadm registry-login {{ ceph_docker_registry }} {{ ceph_docker_registry_username }} {{ ceph_docker_registry_password }}" + - name: Set container registry info + ansible.builtin.command: "{{ ceph_cmd }} cephadm registry-login {{ ceph_docker_registry }} {{ ceph_docker_registry_username }} {{ ceph_docker_registry_password }}" changed_when: false no_log: true run_once: true delegate_to: '{{ groups[mon_group_name][0] }}' when: ceph_docker_registry_auth | bool - - name: remove logrotate configuration - file: + - name: Remove logrotate configuration + ansible.builtin.file: path: /etc/logrotate.d/ceph state: absent when: inventory_hostname in groups.get(mon_group_name, []) or @@ -446,58 +452,58 @@ inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: store existing rbd mirror peers in monitor config store +- name: Store existing rbd mirror peers in monitor config store hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" become: true any_errors_fatal: true gather_facts: true tasks: - - name: store existing rbd mirror peers in monitor config store + - name: Store existing rbd mirror peers in monitor config store when: - ceph_rbd_mirror_configure | default(True) | bool - ceph_rbd_mirror_remote_user is defined - ceph_rbd_mirror_remote_cluster is defined block: - - name: import ceph-defaults - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - name: import ceph-validate - import_role: + - name: Import ceph-validate + ansible.builtin.import_role: name: ceph-validate tasks_from: check_rbdmirror.yml - - name: import container_binary - import_role: + - name: Import container_binary + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: set_fact rbd_cmd - set_fact: + - name: Set_fact rbd_cmd + ansible.builtin.set_fact: rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }} -n client.rbd-mirror.{{ ansible_facts['hostname'] }} -k /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring" - - name: set_fact admin_rbd_cmd - set_fact: + - name: Set_fact admin_rbd_cmd + ansible.builtin.set_fact: admin_rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}" - - name: get mirror pool info - command: "{{ rbd_cmd }} mirror pool info {{ ceph_rbd_mirror_pool }} --format json" + - name: Get mirror pool info + ansible.builtin.command: "{{ rbd_cmd }} mirror pool info {{ ceph_rbd_mirror_pool }} --format json" register: mirror_pool_info changed_when: false - - name: set_fact mirror_peer_found - set_fact: - mirror_peer_uuid: "{{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^'+ceph_rbd_mirror_remote_cluster+'$') | map(attribute='uuid') | list) }}" + - name: Set_fact mirror_peer_found + ansible.builtin.set_fact: + mirror_peer_uuid: "{{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^' + ceph_rbd_mirror_remote_cluster + '$') | map(attribute='uuid') | list) }}" - - name: remove current rbd mirror peer, add new peer into mon config store + - name: Remove current rbd mirror peer, add new peer into mon config store when: mirror_peer_uuid | length > 0 block: - - name: get remote user keyring - slurp: + - name: Get remote user keyring + ansible.builtin.slurp: src: "/etc/ceph/{{ ceph_rbd_mirror_remote_cluster }}.{{ ceph_rbd_mirror_remote_user }}.keyring" register: remote_user_keyring - - name: get quorum_status - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json" + - name: Get quorum_status + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" register: quorum_status @@ -505,60 +511,62 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set_fact mon_ip_list - set_fact: + - name: Set_fact mon_ip_list + ansible.builtin.set_fact: mon_ip_list: "{{ mon_ip_list | default([]) | union([item['addr'].split(':')[0]]) }}" loop: "{{ (quorum_status.stdout | default('{}') | from_json)['monmap']['mons'] }}" run_once: true - - name: remove current mirror peer - command: "{{ admin_rbd_cmd }} mirror pool peer remove {{ ceph_rbd_mirror_pool }} {{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^'+ceph_rbd_mirror_remote_cluster+'$') | map(attribute='uuid') | list)[0] }}" + - name: Remove current mirror peer + ansible.builtin.command: "{{ admin_rbd_cmd }} mirror pool peer remove {{ ceph_rbd_mirror_pool }} {{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^' + ceph_rbd_mirror_remote_cluster + '$') | map(attribute='uuid') | list)[0] }}" delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" changed_when: false - - name: get remote user keyring secret - set_fact: + - name: Get remote user keyring secret + ansible.builtin.set_fact: remote_user_keyring_secret: "{{ item.split('=', 1)[1] | trim }}" with_items: "{{ (remote_user_keyring.content | b64decode).split('\n') }}" when: "'key = ' in item" - - name: create a temporary file - tempfile: + - name: Create a temporary file + ansible.builtin.tempfile: path: /etc/ceph state: file suffix: _ceph-ansible register: tmp_file delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" - - name: write secret to temporary file - copy: + - name: Write secret to temporary file + ansible.builtin.copy: dest: "{{ tmp_file.path }}" content: "{{ remote_user_keyring_secret }}" + mode: preserve delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" - - name: re-add mirror peer - command: "{{ admin_rbd_cmd }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ','.join(mon_ip_list) }} --remote-key-file {{ tmp_file.path }}" + - name: Re-add mirror peer + ansible.builtin.command: "{{ admin_rbd_cmd }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ','.join(mon_ip_list) }} --remote-key-file {{ tmp_file.path }}" delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" changed_when: false - - name: rm temporary file - file: + - name: Rm temporary file + ansible.builtin.file: path: "{{ tmp_file.path }}" state: absent delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}" -- name: adopt ceph mon daemons +- name: Adopt ceph mon daemons hosts: "{{ mon_group_name|default('mons') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: adopt mon daemon + - name: Adopt mon daemon cephadm_adopt: name: "mon.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -567,14 +575,14 @@ pull: false firewalld: "{{ true if configure_firewall | bool else false }}" - - name: reset failed ceph-mon systemd unit - command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa 303 + - name: Reset failed ceph-mon systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module changed_when: false failed_when: false when: containerized_deployment | bool - - name: remove ceph-mon systemd files - file: + - name: Remove ceph-mon systemd files + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -582,8 +590,8 @@ - /etc/systemd/system/ceph-mon@.service.d - /etc/systemd/system/ceph-mon.target - - name: waiting for the monitor to join the quorum... - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json" + - name: Waiting for the monitor to join the quorum... + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json" changed_when: false register: ceph_health_raw until: > @@ -593,17 +601,18 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: adopt ceph mgr daemons +- name: Adopt ceph mgr daemons hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: adopt mgr daemon + - name: Adopt mgr daemon cephadm_adopt: name: "mgr.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -612,14 +621,14 @@ pull: false firewalld: "{{ true if configure_firewall | bool else false }}" - - name: reset failed ceph-mgr systemd unit - command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa 303 + - name: Reset failed ceph-mgr systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module changed_when: false failed_when: false when: containerized_deployment | bool - - name: remove ceph-mgr systemd files - file: + - name: Remove ceph-mgr systemd files + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -628,18 +637,19 @@ - /etc/systemd/system/ceph-mgr.target -- name: stop and remove legacy iscsigw daemons +- name: Stop and remove legacy iscsigw daemons hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop and disable iscsigw systemd services - service: + - name: Stop and disable iscsigw systemd services + ansible.builtin.service: name: '{{ item }}' state: stopped enabled: false @@ -649,8 +659,8 @@ - rbd-target-gw - tcmu-runner - - name: reset failed iscsigw systemd units - command: 'systemctl reset-failed {{ item }}' # noqa 303 + - name: Reset failed iscsigw systemd units + ansible.builtin.command: 'systemctl reset-failed {{ item }}' # noqa command-instead-of-module changed_when: false failed_when: false with_items: @@ -659,8 +669,8 @@ - tcmu-runner when: containerized_deployment | bool - - name: remove iscsigw systemd unit files - file: + - name: Remove iscsigw systemd unit files + ansible.builtin.file: path: '/etc/systemd/system/{{ item }}.service' state: absent with_items: @@ -670,17 +680,18 @@ when: containerized_deployment | bool -- name: redeploy iscsigw daemons +- name: Redeploy iscsigw daemons hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: update the placement of iscsigw hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'" + - name: Update the placement of iscsigw hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'" run_once: true changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' @@ -688,45 +699,46 @@ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: set osd flags +- name: Set osd flags hosts: "{{ osd_group_name|default('osds') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: get pool list - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" register: pool_list run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false check_mode: false - - name: get balancer module status - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" register: balancer_status_adopt run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false check_mode: false - - name: set_fact pools_pgautoscaler_mode - set_fact: + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" run_once: true with_items: "{{ pool_list.stdout | default('{}') | from_json }}" - - name: disable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: (balancer_status_adopt.stdout | from_json)['active'] | bool - - name: disable pg autoscale on pools + - name: Disable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -741,7 +753,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: set osd flags + - name: Set osd flags ceph_osd_flag: cluster: "{{ cluster }}" name: "{{ item }}" @@ -755,22 +767,24 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: adopt ceph osd daemons +- name: Adopt ceph osd daemons hosts: "{{ osd_group_name|default('osd') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml when: containerized_deployment | bool - - name: get osd list + - name: Get osd list ceph_volume: cluster: "{{ cluster }}" action: list @@ -779,27 +793,29 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" register: osd_list - - name: set osd fsid for containerized deployment - lineinfile: + - name: Set osd fsid for containerized deployment + ansible.builtin.lineinfile: path: '/var/lib/ceph/osd/{{ cluster }}-{{ item.key }}/fsid' line: "{{ (item.value | selectattr('type', 'equalto', 'block') | map(attribute='tags') | first)['ceph.osd_fsid'] }}" owner: '{{ ceph_uid }}' group: '{{ ceph_uid }}' create: true + mode: "0644" with_dict: '{{ osd_list.stdout | from_json }}' when: containerized_deployment | bool - - name: set osd type for containerized deployment - lineinfile: + - name: Set osd type for containerized deployment + ansible.builtin.lineinfile: path: '/var/lib/ceph/osd/{{ cluster }}-{{ item }}/type' line: 'bluestore' owner: '{{ ceph_uid }}' group: '{{ ceph_uid }}' create: true + mode: "0644" loop: '{{ (osd_list.stdout | from_json).keys() | list }}' when: containerized_deployment | bool - - name: adopt osd daemon + - name: Adopt osd daemon cephadm_adopt: name: "osd.{{ item }}" cluster: "{{ cluster }}" @@ -809,8 +825,8 @@ firewalld: "{{ true if configure_firewall | bool else false }}" loop: '{{ (osd_list.stdout | from_json).keys() | list }}' - - name: remove ceph-osd systemd and ceph-osd-run.sh files - file: + - name: Remove ceph-osd systemd and ceph-osd-run.sh files + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -819,19 +835,19 @@ - /etc/systemd/system/ceph-osd.target - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh" - - name: remove osd directory - file: + - name: Remove osd directory + ansible.builtin.file: path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" state: absent loop: '{{ (osd_list.stdout | from_json).keys() | list }}' - - name: remove any legacy directories in /var/lib/ceph/mon (workaround) - file: + - name: Remove any legacy directories in /var/lib/ceph/mon (workaround) + ansible.builtin.file: path: "/var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}" state: absent - - name: waiting for clean pgs... - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph pg stat --format json" + - name: Waiting for clean pgs... + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph pg stat --format json" changed_when: false register: ceph_health_post until: > @@ -844,16 +860,17 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: unset osd flags +- name: Unset osd flags hosts: "{{ osd_group_name|default('osds') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: re-enable pg autoscale on pools + - name: Re-enable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -868,7 +885,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: unset osd flags + - name: Unset osd flags ceph_osd_flag: cluster: "{{ cluster }}" name: "{{ item }}" @@ -882,62 +899,64 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: re-enable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: (balancer_status_adopt.stdout | from_json)['active'] | bool -- name: redeploy mds daemons +- name: Redeploy mds daemons hosts: "{{ mds_group_name|default('mdss') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: update the placement of metadata hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'" + - name: Update the placement of metadata hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'" run_once: true changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: stop and remove legacy ceph mds daemons +- name: Stop and remove legacy ceph mds daemons hosts: "{{ mds_group_name|default('mdss') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop and disable ceph-mds systemd service - service: + - name: Stop and disable ceph-mds systemd service + ansible.builtin.service: name: "ceph-mds@{{ ansible_facts['hostname'] }}" state: stopped enabled: false failed_when: false - - name: stop and disable ceph-mds systemd target - service: + - name: Stop and disable ceph-mds systemd target + ansible.builtin.service: name: ceph-mds.target state: stopped enabled: false failed_when: false - - name: reset failed ceph-mds systemd unit - command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa 303 + - name: Reset failed ceph-mds systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module changed_when: false failed_when: false when: containerized_deployment | bool - - name: remove ceph-mds systemd files - file: + - name: Remove ceph-mds systemd files + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -945,53 +964,55 @@ - /etc/systemd/system/ceph-mds@.service.d - /etc/systemd/system/ceph-mds.target - - name: remove legacy ceph mds data - file: + - name: Remove legacy ceph mds data + ansible.builtin.file: path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}" state: absent -- name: redeploy rgw daemons +- name: Redeploy rgw daemons hosts: "{{ rgw_group_name | default('rgws') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: set_radosgw_address.yml - - name: import rgw ssl certificate into kv store + - name: Import rgw ssl certificate into kv store when: radosgw_frontend_ssl_certificate | length > 0 block: - - name: slurp rgw ssl certificate - slurp: + - name: Slurp rgw ssl certificate + ansible.builtin.slurp: src: "{{ radosgw_frontend_ssl_certificate }}" register: rgw_ssl_cert - - name: store ssl certificate in kv store - command: > + - name: Store ssl certificate in kv store + ansible.builtin.command: > {{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }} -i - args: stdin: "{{ rgw_ssl_cert.content | b64decode }}" - stdin_add_newline: no + stdin_add_newline: false changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set_fact rgw_subnet - set_fact: + - name: Set_fact rgw_subnet + ansible.builtin.set_fact: rgw_subnet: "--networks {{ radosgw_address_block }}" when: - radosgw_address_block is defined - radosgw_address_block != 'subnet' - - name: update the placement of radosgw hosts - command: > + - name: Update the placement of radosgw hosts + ansible.builtin.command: > {{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rgw {{ ansible_facts['hostname'] }} --placement='count-per-host:{{ radosgw_num_instances }} {{ ansible_facts['nodename'] }}' @@ -1003,44 +1024,46 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: stop and remove legacy ceph rgw daemons +- name: Stop and remove legacy ceph rgw daemons hosts: "{{ rgw_group_name|default('rgws') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: set_radosgw_address.yml - - name: stop and disable ceph-radosgw systemd service - service: + - name: Stop and disable ceph-radosgw systemd service + ansible.builtin.service: name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: stopped enabled: false failed_when: false loop: '{{ rgw_instances }}' - - name: stop and disable ceph-radosgw systemd target - service: + - name: Stop and disable ceph-radosgw systemd target + ansible.builtin.service: name: ceph-radosgw.target state: stopped enabled: false failed_when: false - - name: reset failed ceph-radosgw systemd unit - command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa 303 + - name: Reset failed ceph-radosgw systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module changed_when: false failed_when: false loop: '{{ rgw_instances }}' when: containerized_deployment | bool - - name: remove ceph-radosgw systemd files - file: + - name: Remove ceph-radosgw systemd files + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -1048,33 +1071,35 @@ - /etc/systemd/system/ceph-radosgw@.service.d - /etc/systemd/system/ceph-radosgw.target - - name: remove legacy ceph radosgw data - file: + - name: Remove legacy ceph radosgw data + ansible.builtin.file: path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: absent loop: '{{ rgw_instances }}' - - name: remove legacy ceph radosgw directory - file: + - name: Remove legacy ceph radosgw directory + ansible.builtin.file: path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}" state: absent -- name: stop and remove legacy ceph nfs daemons +- name: Stop and remove legacy ceph nfs daemons hosts: "{{ nfs_group_name|default('nfss') }}" tags: 'ceph_nfs_adopt' serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-nfs role + ansible.builtin.import_role: name: ceph-nfs tasks_from: create_rgw_nfs_user.yml - - name: enable ceph mgr nfs module + - name: Enable ceph mgr nfs module ceph_mgr_module: name: "nfs" cluster: "{{ cluster }}" @@ -1084,104 +1109,106 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" delegate_to: "{{ groups[mon_group_name][0] }}" - - name: stop and disable ceph-nfs systemd service - service: + - name: Stop and disable ceph-nfs systemd service + ansible.builtin.service: name: "ceph-nfs@{{ ansible_facts['hostname'] }}" state: stopped enabled: false failed_when: false - - name: reset failed ceph-nfs systemd unit - command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa 303 + - name: Reset failed ceph-nfs systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module changed_when: false failed_when: false when: containerized_deployment | bool - - name: remove ceph-nfs systemd unit files - file: + - name: Remove ceph-nfs systemd unit files + ansible.builtin.file: path: "{{ item }}" state: absent loop: - /etc/systemd/system/ceph-nfs@.service - /etc/systemd/system/ceph-nfs@.service.d - - name: remove legacy ceph radosgw directory - file: + - name: Remove legacy ceph radosgw directory + ansible.builtin.file: path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}" state: absent - - name: create nfs ganesha cluster - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}" + - name: Create nfs ganesha cluster + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: create cephfs export - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}" + - name: Create cephfs export + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' when: nfs_file_gw | bool - - name: create rgw export - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}" + - name: Create rgw export + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' when: nfs_obj_gw | bool -- name: redeploy rbd-mirror daemons +- name: Redeploy rbd-mirror daemons hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: update the placement of rbd-mirror hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'" + - name: Update the placement of rbd-mirror hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'" run_once: true changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: stop and remove legacy rbd-mirror daemons +- name: Stop and remove legacy rbd-mirror daemons hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop and disable rbd-mirror systemd service - service: + - name: Stop and disable rbd-mirror systemd service + ansible.builtin.service: name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" state: stopped enabled: false failed_when: false - - name: stop and disable rbd-mirror systemd target - service: + - name: Stop and disable rbd-mirror systemd target + ansible.builtin.service: name: ceph-rbd-mirror.target state: stopped enabled: false failed_when: false - - name: reset failed rbd-mirror systemd unit - command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa 303 + - name: Reset failed rbd-mirror systemd unit + ansible.builtin.command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module changed_when: false failed_when: false when: containerized_deployment | bool - - name: remove rbd-mirror systemd files - file: + - name: Remove rbd-mirror systemd files + ansible.builtin.file: path: "{{ item }}" state: absent loop: @@ -1190,7 +1217,7 @@ - /etc/systemd/system/ceph-rbd-mirror.target -- name: redeploy ceph-crash daemons +- name: Redeploy ceph-crash daemons hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -1200,25 +1227,26 @@ - "{{ rbdmirror_group_name|default('rbdmirrors') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop and disable ceph-crash systemd service - service: + - name: Stop and disable ceph-crash systemd service + ansible.builtin.service: name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" state: stopped enabled: false failed_when: false - - name: remove ceph-crash systemd unit file - file: + - name: Remove ceph-crash systemd unit file + ansible.builtin.file: path: /etc/systemd/system/ceph-crash@.service state: absent - - name: update the placement of ceph-crash hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply crash --placement='label:ceph'" + - name: Update the placement of ceph-crash hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply crash --placement='label:ceph'" run_once: true changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' @@ -1226,31 +1254,32 @@ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: redeploy alertmanager/grafana/prometheus daemons +- name: Redeploy alertmanager/grafana/prometheus daemons hosts: "{{ monitoring_group_name|default('monitoring') }}" serial: 1 become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: check whether a ceph config file is present - stat: + - name: Check whether a ceph config file is present + ansible.builtin.stat: path: "/etc/ceph/{{ cluster }}.conf" register: ceph_config - - name: ensure /etc/ceph is present - file: + - name: Ensure /etc/ceph is present + ansible.builtin.file: path: /etc/ceph state: directory owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" mode: "{{ ceph_directories_mode }}" - - name: write a ceph.conf with minimal config - copy: + - name: Write a ceph.conf with minimal config + ansible.builtin.copy: dest: "/etc/ceph/{{ cluster }}.conf" content: "{{ minimal_config.stdout }}" owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}" @@ -1258,42 +1287,43 @@ mode: "{{ ceph_keyring_permissions }}" when: not ceph_config.stat.exists | bool - - name: with dashboard enabled + - name: With dashboard enabled when: dashboard_enabled | bool block: - - name: ensure alertmanager/prometheus data directories are present - file: + - name: Ensure alertmanager/prometheus data directories are present + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" + mode: "0755" with_items: - - "{{ alertmanager_data_dir }}" - - "{{ prometheus_data_dir }}" + - "{{ alertmanager_data_dir }}" + - "{{ prometheus_data_dir }}" # (workaround) cephadm adopt alertmanager only stops prometheus-alertmanager systemd service - - name: stop and disable alertmanager systemd unit - service: + - name: Stop and disable alertmanager systemd unit + ansible.builtin.service: name: alertmanager state: stopped enabled: false failed_when: false # (workaround) cephadm adopt alertmanager only uses /etc/prometheus/alertmanager.yml - - name: create alertmanager config symlink - file: + - name: Create alertmanager config symlink + ansible.builtin.file: path: /etc/prometheus/alertmanager.yml src: '{{ alertmanager_conf_dir }}/alertmanager.yml' state: link # (workaround) cephadm adopt alertmanager only uses /var/lib/prometheus/alertmanager/ - - name: create alertmanager data symlink - file: + - name: Create alertmanager data symlink + ansible.builtin.file: path: '{{ prometheus_data_dir }}/alertmanager' src: '{{ alertmanager_data_dir }}' state: link - - name: adopt alertmanager daemon + - name: Adopt alertmanager daemon cephadm_adopt: name: "alertmanager.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -1302,52 +1332,54 @@ pull: false firewalld: "{{ true if configure_firewall | bool else false }}" - - name: remove alertmanager systemd unit file - file: + - name: Remove alertmanager systemd unit file + ansible.builtin.file: path: /etc/systemd/system/alertmanager.service state: absent - - name: remove the legacy alertmanager data - file: + - name: Remove the legacy alertmanager data + ansible.builtin.file: path: '{{ alertmanager_data_dir }}' state: absent - - name: stop and disable prometheus systemd unit - service: + - name: Stop and disable prometheus systemd unit + ansible.builtin.service: name: prometheus state: stopped enabled: false failed_when: false - - name: remove alertmanager data symlink - file: + - name: Remove alertmanager data symlink + ansible.builtin.file: path: '{{ prometheus_data_dir }}/alertmanager' state: absent # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/ - - name: tmp copy the prometheus data - copy: + - name: Tmp copy the prometheus data + ansible.builtin.copy: src: '{{ prometheus_data_dir }}/' dest: /var/lib/prom_metrics owner: 65534 group: 65534 + mode: preserve remote_src: true # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/ - - name: restore the prometheus data - copy: + - name: Restore the prometheus data + ansible.builtin.copy: src: /var/lib/prom_metrics/ dest: /var/lib/prometheus/metrics owner: 65534 group: 65534 + mode: preserve remote_src: true - - name: remove the tmp prometheus data copy - file: + - name: Remove the tmp prometheus data copy + ansible.builtin.file: path: /var/lib/prom_metrics state: absent - - name: adopt prometheus daemon + - name: Adopt prometheus daemon cephadm_adopt: name: "prometheus.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -1356,25 +1388,25 @@ pull: false firewalld: "{{ true if configure_firewall | bool else false }}" - - name: remove prometheus systemd unit file - file: + - name: Remove prometheus systemd unit file + ansible.builtin.file: path: /etc/systemd/system/prometheus.service state: absent - - name: remove the legacy prometheus data - file: + - name: Remove the legacy prometheus data + ansible.builtin.file: path: '{{ prometheus_data_dir }}' state: absent # (workaround) cephadm adopt grafana only stops grafana systemd service - - name: stop and disable grafana systemd unit - service: + - name: Stop and disable grafana systemd unit + ansible.builtin.service: name: grafana-server state: stopped enabled: false failed_when: false - - name: adopt grafana daemon + - name: Adopt grafana daemon cephadm_adopt: name: "grafana.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -1383,17 +1415,17 @@ pull: false firewalld: "{{ true if configure_firewall | bool else false }}" - - name: remove grafana systemd unit file - file: + - name: Remove grafana systemd unit file + ansible.builtin.file: path: /etc/systemd/system/grafana-server.service state: absent - - name: remove the legacy grafana data - file: + - name: Remove the legacy grafana data + ansible.builtin.file: path: /var/lib/grafana state: absent -- name: redeploy node-exporter daemons +- name: Redeploy node-exporter daemons hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -1406,28 +1438,29 @@ - "{{ monitoring_group_name|default('monitoring') }}" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: with dashboard enabled + - name: With dashboard enabled when: dashboard_enabled | bool block: - - name: stop and disable node-exporter systemd service - service: + - name: Stop and disable node-exporter systemd service + ansible.builtin.service: name: node_exporter state: stopped enabled: false failed_when: false - - name: remove node_exporter systemd unit file - file: + - name: Remove node_exporter systemd unit file + ansible.builtin.file: path: /etc/systemd/system/node_exporter.service state: absent - - name: update the placement of node-exporter hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply node-exporter --placement='*'" + - name: Update the placement of node-exporter hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply node-exporter --placement='*'" run_once: true changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' @@ -1435,79 +1468,81 @@ CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: adjust placement daemons +- name: Adjust placement daemons hosts: "{{ mon_group_name|default('mons') }}[0]" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: update the placement of monitor hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'" + - name: Update the placement of monitor hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set_fact mgr_placement - set_fact: + - name: Set_fact mgr_placement + ansible.builtin.set_fact: mgr_placement_count: "{{ groups.get(mgr_group_name, []) | length if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name, []) | length }}" - - name: set_fact mgr_placement_label - set_fact: + - name: Set_fact mgr_placement_label + ansible.builtin.set_fact: mgr_placement_label: "{{ mgr_group_name if groups.get(mgr_group_name, []) | length > 0 else mon_group_name }}" - - name: update the placement of manager hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mgr --placement='{{ mgr_placement_count }} label:{{ mgr_placement_label }}'" + - name: Update the placement of manager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mgr --placement='{{ mgr_placement_count }} label:{{ mgr_placement_label }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: with dashboard enabled + - name: With dashboard enabled when: dashboard_enabled | bool block: - - name: update the placement of alertmanager hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply alertmanager --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" + - name: Update the placement of alertmanager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply alertmanager --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of grafana hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" + - name: Update the placement of grafana hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of prometheus hosts - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply prometheus --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" + - name: Update the placement of prometheus hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply prometheus --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: show ceph orchestrator status +- name: Show ceph orchestrator status hosts: "{{ mon_group_name|default('mons') }}[0]" become: true gather_facts: false - any_errors_fatal: True + any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: show ceph orchestrator services - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ls --refresh" + - name: Show ceph orchestrator services + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ls --refresh" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: show ceph orchestrator daemons - command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ps --refresh" + - name: Show ceph orchestrator daemons + ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ps --refresh" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: inform users about cephadm - debug: + - name: Inform users about cephadm + ansible.builtin.debug: msg: | This Ceph cluster is now managed by cephadm. Any new changes to the cluster need to be achieved by using the cephadm CLI and you don't diff --git a/infrastructure-playbooks/cephadm.yml b/infrastructure-playbooks/cephadm.yml index 01a6ce2bd1..6a8612a615 100644 --- a/infrastructure-playbooks/cephadm.yml +++ b/infrastructure-playbooks/cephadm.yml @@ -1,5 +1,5 @@ --- -- name: gather facts and prepare system for cephadm +- name: Gather facts and prepare system for cephadm hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -15,23 +15,24 @@ vars: delegate_facts_host: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: validate if monitor group doesn't exist or empty - fail: + - name: Validate if monitor group doesn't exist or empty + ansible.builtin.fail: msg: "you must add a [mons] group and add at least one node." run_once: true when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0 - - name: validate if manager group doesn't exist or empty - fail: + - name: Validate if manager group doesn't exist or empty + ansible.builtin.fail: msg: "you must add a [mgrs] group and add at least one node." run_once: true when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0 - - name: validate monitor network configuration - fail: + - name: Validate monitor network configuration + ansible.builtin.fail: msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided" when: - mon_group_name in group_names @@ -39,38 +40,38 @@ - monitor_address_block == 'subnet' - monitor_interface == 'interface' - - name: validate dashboard configuration + - name: Validate dashboard configuration when: dashboard_enabled | bool run_once: true block: - - name: fail if [monitoring] group doesn't exist or empty - fail: + - name: Fail if [monitoring] group doesn't exist or empty + ansible.builtin.fail: msg: "you must add a [monitoring] group and add at least one node." when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0 - - name: fail when dashboard_admin_password is not set - fail: + - name: Fail when dashboard_admin_password is not set + ansible.builtin.fail: msg: "you must set dashboard_admin_password." when: dashboard_admin_password is undefined - - name: validate container registry credentials - fail: + - name: Validate container registry credentials + ansible.builtin.fail: msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set' when: - ceph_docker_registry_auth | bool - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or (ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0) - - name: gather facts - setup: + - name: Gather facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' when: not delegate_facts_host | bool - - name: gather and delegate facts - setup: + - name: Gather and delegate facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' @@ -81,76 +82,82 @@ run_once: true when: delegate_facts_host | bool - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: check if it is atomic host - stat: + - name: Check if it is atomic host + ansible.builtin.stat: path: /run/ostree-booted register: stat_ostree - - name: set_fact is_atomic - set_fact: + - name: Set_fact is_atomic + ansible.builtin.set_fact: is_atomic: "{{ stat_ostree.stat.exists }}" - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common tasks_from: registry.yml when: ceph_docker_registry_auth | bool - - name: configure repository for installing cephadm + - name: Configure repository for installing cephadm vars: ceph_origin: repository ceph_repository: community block: - - name: validate repository variables - import_role: + - name: Validate repository variables + ansible.builtin.import_role: name: ceph-validate tasks_from: check_repository.yml - - name: configure repository - import_role: + - name: Configure repository + ansible.builtin.import_role: name: ceph-common tasks_from: "configure_repository.yml" - - name: install cephadm requirements - package: + - name: Install cephadm requirements + ansible.builtin.package: name: ['python3', 'lvm2'] register: result until: result is succeeded - - name: install cephadm - package: + - name: Install cephadm + ansible.builtin.package: name: cephadm register: result until: result is succeeded - - name: set_fact cephadm_cmd - set_fact: + - name: Set_fact cephadm_cmd + ansible.builtin.set_fact: cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}" -- name: bootstrap the cluster +- name: Bootstrap the cluster hosts: "{{ mon_group_name|default('mons') }}[0]" become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: set_monitor_address.yml - - name: create /etc/ceph directory - file: + - name: Create /etc/ceph directory + ansible.builtin.file: path: /etc/ceph state: directory + mode: "0755" - - name: bootstrap the new cluster + - name: Bootstrap the new cluster cephadm_bootstrap: mon_ip: "{{ _current_monitor_address }}" image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" @@ -164,46 +171,46 @@ ssh_user: "{{ cephadm_ssh_user | default('root') }}" ssh_config: "{{ cephadm_ssh_config | default(omit) }}" - - name: set default container image in ceph configuration - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + - name: Set default container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set container image base in ceph configuration - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" + - name: Set container image base in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set dashboard container image in ceph mgr configuration + - name: Set dashboard container image in ceph mgr configuration when: dashboard_enabled | bool block: - - name: set alertmanager container image in ceph configuration - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" + - name: Set alertmanager container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set grafana container image in ceph configuration - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" + - name: Set grafana container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set node-exporter container image in ceph configuration - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" + - name: Set node-exporter container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: set prometheus container image in ceph configuration - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" + - name: Set prometheus container image in ceph configuration + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: add the other nodes +- name: Add the other nodes hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -217,11 +224,12 @@ become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: get the cephadm ssh pub key - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key" + - name: Get the cephadm ssh pub key + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key" changed_when: false run_once: true register: cephadm_pubpkey @@ -229,35 +237,35 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: allow cephadm key for {{ cephadm_ssh_user | default('root') }} account - authorized_key: + - name: Allow cephadm key + ansible.posix.authorized_key: user: "{{ cephadm_ssh_user | default('root') }}" key: '{{ cephadm_pubpkey.stdout }}' - - name: run cephadm prepare-host - command: cephadm prepare-host + - name: Run cephadm prepare-host + ansible.builtin.command: cephadm prepare-host changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: manage nodes with cephadm - ipv4 - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" + - name: Manage nodes with cephadm - ipv4 + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' when: ip_version == 'ipv4' - - name: manage nodes with cephadm - ipv6 - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" + - name: Manage nodes with cephadm - ipv6 + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' when: ip_version == 'ipv6' - - name: add ceph label for core component - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph" + - name: Add ceph label for core component + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph" changed_when: false delegate_to: '{{ groups[mon_group_name][0] }}' when: inventory_hostname in groups.get(mon_group_name, []) or @@ -269,22 +277,23 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: adjust service placement +- name: Adjust service placement hosts: "{{ mon_group_name|default('mons') }}[0]" become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: update the placement of monitor hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'" + - name: Update the placement of monitor hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: waiting for the monitor to join the quorum... - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json" + - name: Waiting for the monitor to join the quorum... + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json" changed_when: false register: ceph_health_raw until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length @@ -293,83 +302,85 @@ environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of manager hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'" + - name: Update the placement of manager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of crash hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'" + - name: Update the placement of crash hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: adjust monitoring service placement +- name: Adjust monitoring service placement hosts: "{{ monitoring_group_name|default('monitoring') }}" become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - name: with dashboard enabled + - name: With dashboard enabled when: dashboard_enabled | bool delegate_to: '{{ groups[mon_group_name][0] }}' run_once: true block: - - name: enable the prometheus module - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus" + - name: Enable the prometheus module + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of alertmanager hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'" + - name: Update the placement of alertmanager hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of grafana hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'" + - name: Update the placement of grafana hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of prometheus hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'" + - name: Update the placement of prometheus hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: update the placement of node-exporter hosts - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'" + - name: Update the placement of node-exporter hosts + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' -- name: print information +- name: Print information hosts: "{{ mon_group_name|default('mons') }}[0]" become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - name: show ceph orchestrator services - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh" + - name: Show ceph orchestrator services + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: show ceph orchestrator daemons - command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh" + - name: Show ceph orchestrator daemons + ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh" changed_when: false environment: CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}' - - name: inform users about cephadm - debug: + - name: Inform users about cephadm + ansible.builtin.debug: msg: | This Ceph cluster is now ready to receive more configuration like adding OSD, MDS daemons, create pools or keyring. diff --git a/infrastructure-playbooks/docker-to-podman.yml b/infrastructure-playbooks/docker-to-podman.yml index 9cbd12e343..0367da1403 100644 --- a/infrastructure-playbooks/docker-to-podman.yml +++ b/infrastructure-playbooks/docker-to-podman.yml @@ -5,54 +5,58 @@ # It is *not* intended to restart services since we don't want to multiple services # restarts. -- hosts: - - mons - - osds - - mdss - - rgws - - nfss - - rbdmirrors - - clients - - iscsigws - - mgrs - - monitoring +- name: Pre-requisite and facts gathering + hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - iscsigws + - mgrs + - monitoring gather_facts: false - become: True + become: true any_errors_fatal: true vars: - delegate_facts_host: True + delegate_facts_host: true pre_tasks: - - import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" + - name: Import raw_install_python tasks + ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml" tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults # pre-tasks for following import - - - name: gather facts - setup: + - name: Gather facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) - - name: gather and delegate facts - setup: + - name: Gather and delegate facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' delegate_to: "{{ item }}" - delegate_facts: True + delegate_facts: true with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}" run_once: true when: delegate_facts_host | bool -- hosts: +- name: Migrate to podman + hosts: - "{{ mon_group_name | default('mons') }}" - "{{ osd_group_name | default('osds') }}" - "{{ mds_group_name | default('mdss') }}" @@ -65,20 +69,25 @@ gather_facts: false become: true tasks: - - name: set_fact docker2podman and container_binary - set_fact: - docker2podman: True + - name: Set_fact docker2podman and container_binary + ansible.builtin.set_fact: + docker2podman: true container_binary: podman - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - name: install podman - package: + - name: Install podman + ansible.builtin.package: name: podman state: present register: result @@ -86,17 +95,17 @@ tags: with_pkg when: not is_atomic | bool - - name: check podman presence # noqa : 305 - shell: command -v podman + - name: Check podman presence # noqa command-instead-of-shell + ansible.builtin.shell: command -v podman register: podman_presence changed_when: false failed_when: false - - name: pulling images from docker daemon + - name: Pulling images from docker daemon when: podman_presence.rc == 0 block: - - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image from docker daemon" - command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + - name: Pulling Ceph container image from docker daemon + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" changed_when: false register: pull_image until: pull_image.rc == 0 @@ -111,8 +120,8 @@ inventory_hostname in groups.get(iscsi_gw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, []) - - name: "pulling alertmanager/grafana/prometheus images from docker daemon" - command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}" + - name: Pulling alertmanager/grafana/prometheus images from docker daemon + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}" changed_when: false register: pull_image until: pull_image.rc == 0 @@ -126,8 +135,8 @@ - dashboard_enabled | bool - inventory_hostname in groups.get(monitoring_group_name, []) - - name: "pulling {{ node_exporter_container_image }} image from docker daemon" - command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}" + - name: Pulling node_exporter image from docker daemon + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}" changed_when: false register: pull_image until: pull_image.rc == 0 @@ -135,47 +144,56 @@ delay: 10 when: dashboard_enabled | bool - - import_role: + - name: Import ceph-mon role + ansible.builtin.import_role: name: ceph-mon tasks_from: systemd.yml when: inventory_hostname in groups.get(mon_group_name, []) - - import_role: + - name: Import ceph-iscsi-gw role + ansible.builtin.import_role: name: ceph-iscsi-gw tasks_from: systemd.yml when: inventory_hostname in groups.get(iscsi_gw_group_name, []) - - import_role: + - name: Import ceph-mds role + ansible.builtin.import_role: name: ceph-mds tasks_from: systemd.yml when: inventory_hostname in groups.get(mds_group_name, []) - - import_role: + - name: Import ceph-mgr role + ansible.builtin.import_role: name: ceph-mgr tasks_from: systemd.yml when: inventory_hostname in groups.get(mgr_group_name, []) - - import_role: + - name: Import ceph-nfs role + ansible.builtin.import_role: name: ceph-nfs tasks_from: systemd.yml when: inventory_hostname in groups.get(nfs_group_name, []) - - import_role: + - name: Import ceph-osd role + ansible.builtin.import_role: name: ceph-osd tasks_from: systemd.yml when: inventory_hostname in groups.get(osd_group_name, []) - - import_role: + - name: Import ceph-rbd-mirror role + ansible.builtin.import_role: name: ceph-rbd-mirror tasks_from: systemd.yml when: inventory_hostname in groups.get(rbdmirror_group_name, []) - - import_role: + - name: Import ceph-rgw role + ansible.builtin.import_role: name: ceph-rgw tasks_from: systemd.yml when: inventory_hostname in groups.get(rgw_group_name, []) - - import_role: + - name: Import ceph-crash role + ansible.builtin.import_role: name: ceph-crash tasks_from: systemd.yml when: inventory_hostname in groups.get(mon_group_name, []) or @@ -185,28 +203,32 @@ inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(rbdmirror_group_name, []) - - name: dashboard configuration + - name: Dashboard configuration when: dashboard_enabled | bool block: - - import_role: + - name: Import ceph-node-exporter role + ansible.builtin.import_role: name: ceph-node-exporter tasks_from: systemd.yml - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: grafana.yml when: inventory_hostname in groups.get(monitoring_group_name, []) - - import_role: + - name: Import ceph-grafana role + ansible.builtin.import_role: name: ceph-grafana tasks_from: systemd.yml when: inventory_hostname in groups.get(monitoring_group_name, []) - - import_role: + - name: Import ceph-prometheus role + ansible.builtin.import_role: name: ceph-prometheus tasks_from: systemd.yml when: inventory_hostname in groups.get(monitoring_group_name, []) - - name: reload systemd daemon - systemd: - daemon_reload: yes \ No newline at end of file + - name: Reload systemd daemon + ansible.builtin.systemd: + daemon_reload: true diff --git a/infrastructure-playbooks/gather-ceph-logs.yml b/infrastructure-playbooks/gather-ceph-logs.yml index 175645d5b4..9efcf97e87 100644 --- a/infrastructure-playbooks/gather-ceph-logs.yml +++ b/infrastructure-playbooks/gather-ceph-logs.yml @@ -1,20 +1,22 @@ -- hosts: - - mons - - osds - - mdss - - rgws - - nfss - - rbdmirrors - - clients - - mgrs - - iscsigws +--- +- name: Gather ceph logs + hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + - iscsigws gather_facts: false - become: yes + become: true tasks: - - name: create a temp directory - tempfile: + - name: Create a temp directory + ansible.builtin.tempfile: state: directory prefix: ceph_ansible run_once: true @@ -22,17 +24,17 @@ become: false delegate_to: localhost - - name: set_fact lookup_ceph_config - lookup keys, conf and logs - find: + - name: Set_fact lookup_ceph_config - lookup keys, conf and logs + ansible.builtin.find: paths: - /etc/ceph - /var/log/ceph register: ceph_collect - - name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible - fetch: + - name: Collect ceph logs, config and keys on the machine running ansible + ansible.builtin.fetch: src: "{{ item.path }}" dest: "{{ localtempfile.path }}" - fail_on_missing: no - flat: no + fail_on_missing: false + flat: false with_items: "{{ ceph_collect.files }}" diff --git a/infrastructure-playbooks/lv-create.yml b/infrastructure-playbooks/lv-create.yml index 220a8cb349..f504bc4848 100644 --- a/infrastructure-playbooks/lv-create.yml +++ b/infrastructure-playbooks/lv-create.yml @@ -1,4 +1,5 @@ -- name: creates logical volumes for the bucket index or fs journals on a single device. +--- +- name: Creates logical volumes for the bucket index or fs journals on a single device. become: true hosts: osds @@ -21,78 +22,79 @@ tasks: - - name: include vars of lv_vars.yaml - include_vars: - file: lv_vars.yaml # noqa 505 - failed_when: false + - name: Include vars of lv_vars.yaml + ansible.builtin.include_vars: + file: lv_vars.yaml # noqa missing-import + failed_when: false - # ensure nvme_device is set - - name: fail if nvme_device is not defined - fail: - msg: "nvme_device has not been set by the user" - when: nvme_device is undefined or nvme_device == 'dummy' + # ensure nvme_device is set + - name: Fail if nvme_device is not defined + ansible.builtin.fail: + msg: "nvme_device has not been set by the user" + when: nvme_device is undefined or nvme_device == 'dummy' - # need to check if lvm2 is installed - - name: install lvm2 - package: - name: lvm2 - state: present - register: result - until: result is succeeded + # need to check if lvm2 is installed + - name: Install lvm2 + ansible.builtin.package: + name: lvm2 + state: present + register: result + until: result is succeeded - # Make entire nvme device a VG - - name: add nvme device as lvm pv - lvg: - force: yes - pvs: "{{ nvme_device }}" - pesize: 4 - state: present - vg: "{{ nvme_vg_name }}" + # Make entire nvme device a VG + - name: Add nvme device as lvm pv + community.general.lvg: + force: true + pvs: "{{ nvme_device }}" + pesize: 4 + state: present + vg: "{{ nvme_vg_name }}" - - name: create lvs for fs journals for the bucket index on the nvme device - lvol: - lv: "{{ item.journal_name }}" - vg: "{{ nvme_vg_name }}" - size: "{{ journal_size }}" - pvs: "{{ nvme_device }}" - with_items: "{{ nvme_device_lvs }}" + - name: Create lvs for fs journals for the bucket index on the nvme device + community.general.lvol: + lv: "{{ item.journal_name }}" + vg: "{{ nvme_vg_name }}" + size: "{{ journal_size }}" + pvs: "{{ nvme_device }}" + with_items: "{{ nvme_device_lvs }}" - - name: create lvs for fs journals for hdd devices - lvol: - lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" - vg: "{{ nvme_vg_name }}" - size: "{{ journal_size }}" - with_items: "{{ hdd_devices }}" + - name: Create lvs for fs journals for hdd devices + community.general.lvol: + lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ nvme_vg_name }}" + size: "{{ journal_size }}" + with_items: "{{ hdd_devices }}" - - name: create the lv for data portion of the bucket index on the nvme device - lvol: - lv: "{{ item.lv_name }}" - vg: "{{ nvme_vg_name }}" - size: "{{ item.size }}" - pvs: "{{ nvme_device }}" - with_items: "{{ nvme_device_lvs }}" + - name: Create the lv for data portion of the bucket index on the nvme device + community.general.lvol: + lv: "{{ item.lv_name }}" + vg: "{{ nvme_vg_name }}" + size: "{{ item.size }}" + pvs: "{{ nvme_device }}" + with_items: "{{ nvme_device_lvs }}" - # Make sure all hdd devices have a unique volume group - - name: create vgs for all hdd devices - lvg: - force: yes - pvs: "{{ item }}" - pesize: 4 - state: present - vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" - with_items: "{{ hdd_devices }}" + # Make sure all hdd devices have a unique volume group + - name: Create vgs for all hdd devices + community.general.lvg: + force: true + pvs: "{{ item }}" + pesize: 4 + state: present + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + with_items: "{{ hdd_devices }}" - - name: create lvs for the data portion on hdd devices - lvol: - lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" - vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" - size: "{{ hdd_lv_size }}" - pvs: "{{ item }}" - with_items: "{{ hdd_devices }}" + - name: Create lvs for the data portion on hdd devices + community.general.lvol: + lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + size: "{{ hdd_lv_size }}" + pvs: "{{ item }}" + with_items: "{{ hdd_devices }}" - - name: "write output for osds.yml to {{ logfile_path }}" - become: false - copy: - content: "{{ logfile }}" - dest: "{{ logfile_path }}" - delegate_to: localhost + - name: Write output for osds.yml + become: false + ansible.builtin.copy: + content: "{{ logfile }}" + dest: "{{ logfile_path }}" + mode: preserve + delegate_to: localhost diff --git a/infrastructure-playbooks/lv-teardown.yml b/infrastructure-playbooks/lv-teardown.yml index 4d0654c38c..290f71b353 100644 --- a/infrastructure-playbooks/lv-teardown.yml +++ b/infrastructure-playbooks/lv-teardown.yml @@ -1,108 +1,109 @@ -- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes +--- +- name: Tear down existing osd filesystems then logical volumes, volume groups, and physical volumes become: true hosts: osds vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to tear down the logical volumes? default: 'no' - private: no + private: false tasks: - - name: exit playbook, if user did not mean to tear down logical volumes - fail: - msg: > - "Exiting lv-teardown playbook, logical volumes were NOT torn down. - To tear down the logical volumes, either say 'yes' on the prompt or - or use `-e ireallymeanit=yes` on the command line when - invoking the playbook" - when: ireallymeanit != 'yes' + - name: Exit playbook, if user did not mean to tear down logical volumes + ansible.builtin.fail: + msg: > + "Exiting lv-teardown playbook, logical volumes were NOT torn down. + To tear down the logical volumes, either say 'yes' on the prompt or + or use `-e ireallymeanit=yes` on the command line when + invoking the playbook" + when: ireallymeanit != 'yes' - - name: include vars of lv_vars.yaml - include_vars: - file: lv_vars.yaml # noqa 505 - failed_when: false + - name: Include vars of lv_vars.yaml + ansible.builtin.include_vars: + file: lv_vars.yaml # noqa missing-import + failed_when: false - # need to check if lvm2 is installed - - name: install lvm2 - package: - name: lvm2 - state: present - register: result - until: result is succeeded + # need to check if lvm2 is installed + - name: Install lvm2 + ansible.builtin.package: + name: lvm2 + state: present + register: result + until: result is succeeded # BEGIN TEARDOWN - - name: find any existing osd filesystems - shell: | - set -o pipefail; - grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}' - register: old_osd_filesystems - changed_when: false + - name: Find any existing osd filesystems + ansible.builtin.shell: | + set -o pipefail; + grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}' + register: old_osd_filesystems + changed_when: false - - name: tear down any existing osd filesystem - ansible.posix.mount: - path: "{{ item }}" - state: unmounted - with_items: "{{ old_osd_filesystems.stdout_lines }}" + - name: Tear down any existing osd filesystem + ansible.posix.mount: + path: "{{ item }}" + state: unmounted + with_items: "{{ old_osd_filesystems.stdout_lines }}" - - name: kill all lvm commands that may have been hung - command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n" - failed_when: false - changed_when: false + - name: Kill all lvm commands that may have been hung + ansible.builtin.command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n" + failed_when: false + changed_when: false - ## Logcal Vols - - name: tear down existing lv for bucket index - lvol: - lv: "{{ item.lv_name }}" - vg: "{{ nvme_vg_name }}" - state: absent - force: yes - with_items: "{{ nvme_device_lvs }}" + ## Logcal Vols + - name: Tear down existing lv for bucket index + community.general.lvol: + lv: "{{ item.lv_name }}" + vg: "{{ nvme_vg_name }}" + state: absent + force: true + with_items: "{{ nvme_device_lvs }}" - - name: tear down any existing hdd data lvs - lvol: - lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" - vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" - state: absent - force: yes - with_items: "{{ hdd_devices }}" + - name: Tear down any existing hdd data lvs + community.general.lvol: + lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + state: absent + force: true + with_items: "{{ hdd_devices }}" - - name: tear down any existing lv of journal for bucket index - lvol: - lv: "{{ item.journal_name }}" - vg: "{{ nvme_vg_name }}" - state: absent - force: yes - with_items: "{{ nvme_device_lvs }}" + - name: Tear down any existing lv of journal for bucket index + community.general.lvol: + lv: "{{ item.journal_name }}" + vg: "{{ nvme_vg_name }}" + state: absent + force: true + with_items: "{{ nvme_device_lvs }}" - - name: tear down any existing lvs of hdd journals - lvol: - lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" - vg: "{{ nvme_vg_name }}" - state: absent - force: yes - with_items: "{{ hdd_devices }}" + - name: Tear down any existing lvs of hdd journals + community.general.lvol: + lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}" + vg: "{{ nvme_vg_name }}" + state: absent + force: true + with_items: "{{ hdd_devices }}" - ## Volume Groups - - name: remove vg on nvme device - lvg: - vg: "{{ nvme_vg_name }}" - state: absent - force: yes + ## Volume Groups + - name: Remove vg on nvme device + community.general.lvg: + vg: "{{ nvme_vg_name }}" + state: absent + force: true - - name: remove vg for each hdd device - lvg: - vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" - state: absent - force: yes - with_items: "{{ hdd_devices }}" + - name: Remove vg for each hdd device + community.general.lvg: + vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}" + state: absent + force: true + with_items: "{{ hdd_devices }}" - ## Physical Vols - - name: tear down pv for nvme device - command: "pvremove --force --yes {{ nvme_device }}" - changed_when: false + ## Physical Vols + - name: Tear down pv for nvme device + ansible.builtin.command: "pvremove --force --yes {{ nvme_device }}" + changed_when: false - - name: tear down pv for each hdd device - command: "pvremove --force --yes {{ item }}" - changed_when: false - with_items: "{{ hdd_devices }}" + - name: Tear down pv for each hdd device + ansible.builtin.command: "pvremove --force --yes {{ item }}" + changed_when: false + with_items: "{{ hdd_devices }}" diff --git a/infrastructure-playbooks/purge-cluster.yml b/infrastructure-playbooks/purge-cluster.yml index 747ecbc912..fd372c9b5e 100644 --- a/infrastructure-playbooks/purge-cluster.yml +++ b/infrastructure-playbooks/purge-cluster.yml @@ -11,17 +11,17 @@ # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. -- name: confirm whether user really meant to purge the cluster +- name: Confirm whether user really meant to purge the cluster hosts: localhost gather_facts: false vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to purge the cluster? default: 'no' - private: no + private: false tasks: - - name: exit playbook, if user did not mean to purge cluster - fail: + - name: Exit playbook, if user did not mean to purge cluster + ansible.builtin.fail: msg: > "Exiting purge-cluster playbook, cluster was NOT purged. To purge the cluster, either say 'yes' on the prompt or @@ -30,7 +30,7 @@ when: ireallymeanit != 'yes' -- name: gather facts on all hosts +- name: Gather facts on all hosts hosts: - mons - osds @@ -43,63 +43,67 @@ - monitoring become: true tasks: - - debug: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: msg: "gather facts on all Ceph hosts for following reference" -- name: check there's no ceph kernel threads present +- name: Check there's no ceph kernel threads present hosts: clients become: true gather_facts: false any_errors_fatal: true tasks: - - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - block: - - name: get nfs nodes ansible facts - setup: + - name: Nfs related tasks + when: groups[nfs_group_name] | default([]) | length > 0 + block: + - name: Get nfs nodes ansible facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' delegate_to: "{{ item }}" - delegate_facts: True + delegate_facts: true with_items: "{{ groups[nfs_group_name] }}" run_once: true - - name: get all nfs-ganesha mount points - command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts + - name: Get all nfs-ganesha mount points + ansible.builtin.command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts register: nfs_ganesha_mount_points failed_when: false + changed_when: false with_items: "{{ groups[nfs_group_name] }}" - - name: ensure nfs-ganesha mountpoint(s) are unmounted + - name: Ensure nfs-ganesha mountpoint(s) are unmounted ansible.posix.mount: path: "{{ item.split(' ')[1] }}" state: unmounted with_items: - "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}" when: item | length > 0 - when: groups[nfs_group_name] | default([]) | length > 0 - - name: ensure cephfs mountpoint(s) are unmounted - command: umount -a -t ceph + - name: Ensure cephfs mountpoint(s) are unmounted + ansible.builtin.command: umount -a -t ceph changed_when: false - - name: find mapped rbd ids - find: + - name: Find mapped rbd ids + ansible.builtin.find: paths: /sys/bus/rbd/devices file_type: any register: rbd_mapped_ids - - name: use sysfs to unmap rbd devices - shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major" + - name: Use sysfs to unmap rbd devices + ansible.builtin.shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major" changed_when: false with_items: "{{ rbd_mapped_ids.files }}" - - name: unload ceph kernel modules - modprobe: + - name: Unload ceph kernel modules + community.general.modprobe: name: "{{ item }}" state: absent with_items: @@ -108,22 +112,23 @@ - libceph -- name: purge ceph nfs cluster +- name: Purge ceph nfs cluster hosts: nfss gather_facts: false # Already gathered previously become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop ceph nfss with systemd - service: + - name: Stop ceph nfss with systemd + ansible.builtin.service: name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}" state: stopped failed_when: false - - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}" - file: + - name: Remove ceph nfs directories for "{{ ansible_facts['hostname'] }}" + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -133,7 +138,7 @@ - /etc/systemd/system/ceph-nfs@.service -- name: purge node-exporter +- name: Purge node-exporter hosts: - mons - osds @@ -147,35 +152,39 @@ - iscsigws become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - block: - - import_role: + - name: Dashboard related tasks + when: dashboard_enabled | bool + block: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: disable node_exporter service - service: + - name: Disable node_exporter service + ansible.builtin.service: name: node_exporter state: stopped - enabled: no + enabled: false failed_when: false - - name: remove node_exporter service file - file: + - name: Remove node_exporter service file + ansible.builtin.file: name: /etc/systemd/system/node_exporter.service state: absent - - name: remove node-exporter image - command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" + - name: Remove node-exporter image + ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" failed_when: false + changed_when: false tags: - remove_img - when: dashboard_enabled | bool -- name: purge ceph monitoring +- name: Purge ceph monitoring hosts: monitoring become: true vars: @@ -184,41 +193,46 @@ - prometheus - alertmanager tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - block: - - import_role: + - name: Dashboard related tasks + when: dashboard_enabled | bool + block: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: stop services - service: + - name: Stop services + ansible.builtin.service: name: "{{ item }}" state: stopped - enabled: no + enabled: false with_items: "{{ grafana_services }}" failed_when: false - - name: remove service files - file: + - name: Remove service files + ansible.builtin.file: name: "/etc/systemd/system/{{ item }}.service" state: absent with_items: "{{ grafana_services }}" failed_when: false - - name: remove ceph dashboard container images - command: "{{ container_binary }} rmi {{ item }}" + - name: Remove ceph dashboard container images + ansible.builtin.command: "{{ container_binary }} rmi {{ item }}" with_items: - "{{ prometheus_container_image }}" - "{{ grafana_container_image }}" - "{{ alertmanager_container_image }}" failed_when: false + changed_when: false tags: - remove_img - - name: remove data - file: + - name: Remove data + ansible.builtin.file: name: "{{ item }}" state: absent with_items: @@ -231,23 +245,22 @@ - /var/lib/prometheus - /etc/prometheus failed_when: false - when: dashboard_enabled | bool -- name: purge ceph mds cluster +- name: Purge ceph mds cluster hosts: mdss gather_facts: false # Already gathered previously become: true tasks: - - name: stop ceph mdss with systemd - service: + - name: Stop ceph mdss with systemd + ansible.builtin.service: name: ceph-mds@{{ ansible_facts['hostname'] }} state: stopped - enabled: no + enabled: false failed_when: false - - name: remove ceph mds service - file: + - name: Remove ceph mds service + ansible.builtin.file: path: /etc/systemd/system/ceph-mds{{ item }} state: absent loop: @@ -255,62 +268,64 @@ - '.target' -- name: purge ceph mgr cluster +- name: Purge ceph mgr cluster hosts: mgrs gather_facts: false # Already gathered previously become: true tasks: - - name: stop ceph mgrs with systemd - service: + - name: Stop ceph mgrs with systemd + ansible.builtin.service: name: ceph-mgr@{{ ansible_facts['hostname'] }} state: stopped - enabled: no + enabled: false failed_when: false when: ansible_facts['service_mgr'] == 'systemd' - - name: remove ceph mgr service - file: + - name: Remove ceph mgr service + ansible.builtin.file: path: /etc/systemd/system/ceph-mgr{{ item }} state: absent loop: - '@.service' - '.target' -- name: purge rgwloadbalancer cluster +- name: Purge rgwloadbalancer cluster hosts: rgwloadbalancers gather_facts: false # Already gathered previously become: true tasks: - - name: stop rgwloadbalancer services - service: + - name: Stop rgwloadbalancer services + ansible.builtin.service: name: ['keepalived', 'haproxy'] state: stopped - enabled: no + enabled: false failed_when: false -- name: purge ceph rgw cluster +- name: Purge ceph rgw cluster hosts: rgws gather_facts: false # Already gathered previously become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: set_radosgw_address - - name: stop ceph rgws with systemd - service: + - name: Stop ceph rgws with systemd + ansible.builtin.service: name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: stopped - enabled: no + enabled: false failed_when: false with_items: "{{ rgw_instances }}" - - name: remove ceph rgw service - file: + - name: Remove ceph rgw service + ansible.builtin.file: path: /etc/systemd/system/ceph-radosgw{{ item }} state: absent loop: @@ -318,20 +333,20 @@ - '.target' -- name: purge ceph rbd-mirror cluster +- name: Purge ceph rbd-mirror cluster hosts: rbdmirrors gather_facts: false # Already gathered previously become: true tasks: - - name: stop ceph rbd mirror with systemd - service: + - name: Stop ceph rbd mirror with systemd + ansible.builtin.service: name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" state: stopped - enabled: no + enabled: false failed_when: false - - name: remove ceph rbd-mirror service - file: + - name: Remove ceph rbd-mirror service + ansible.builtin.file: path: /etc/systemd/system/ceph-rbd-mirror{{ item }} state: absent loop: @@ -339,22 +354,23 @@ - '.target' -- name: purge ceph osd cluster +- name: Purge ceph osd cluster vars: - reboot_osd_node: False + reboot_osd_node: false hosts: osds gather_facts: false # Already gathered previously become: true handlers: - - name: restart machine - shell: sleep 2 && shutdown -r now "Ansible updates triggered" + - name: Restart machine # noqa: ignore-errors + ansible.builtin.shell: sleep 2 && shutdown -r now "Ansible updates triggered" async: 1 poll: 0 ignore_errors: true + changed_when: false - - name: wait for server to boot + - name: Wait for server to boot become: false - wait_for: + ansible.builtin.wait_for: port: 22 host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}" state: started @@ -362,35 +378,38 @@ timeout: 500 delegate_to: localhost - - name: remove data - shell: rm -rf /var/lib/ceph/* # noqa 302 + - name: Remove data + ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa no-free-form + changed_when: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: default lvm_volumes if not defined - set_fact: + - name: Default lvm_volumes if not defined + ansible.builtin.set_fact: lvm_volumes: [] when: lvm_volumes is not defined - - name: get osd numbers - shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306 + - name: Get osd numbers + ansible.builtin.shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa risky-shell-pipe register: osd_ids changed_when: false - - name: stop ceph-osd - service: + - name: Stop ceph-osd + ansible.builtin.service: name: ceph-osd@{{ item }} state: stopped - enabled: no + enabled: false with_items: "{{ osd_ids.stdout_lines }}" - - name: remove ceph udev rules - file: + - name: Remove ceph udev rules + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -399,28 +418,28 @@ when: not containerized_deployment | bool # NOTE(leseb): hope someone will find a more elegant way one day... - - name: see if encrypted partitions are present - shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa 306 + - name: See if encrypted partitions are present + ansible.builtin.shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa risky-shell-pipe register: encrypted_ceph_partuuid changed_when: false - - name: get osd data and lockbox mount points - shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa 306 + - name: Get osd data and lockbox mount points + ansible.builtin.shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa risky-shell-pipe register: mounted_osd changed_when: false - - name: drop all cache - shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches" + - name: Drop all cache + ansible.builtin.shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches" changed_when: false - - name: see if ceph-volume is installed # noqa : 305 - shell: command -v ceph-volume + - name: See if ceph-volume is installed # noqa command-instead-of-shell + ansible.builtin.shell: command -v ceph-volume changed_when: false failed_when: false register: ceph_volume_present when: not containerized_deployment | bool - - name: zap and destroy osds by osd ids + - name: Zap and destroy osds by osd ids ceph_volume: osd_id: "{{ item | int }}" action: "zap" @@ -433,93 +452,97 @@ - osd_auto_discovery | default(False) | bool - (containerized_deployment | bool or ceph_volume_present.rc == 0) - - name: umount osd data partition + - name: Umount osd data partition ansible.posix.mount: path: "{{ item }}" state: unmounted with_items: "{{ mounted_osd.stdout_lines }}" - - name: remove osd mountpoint tree - file: + - name: Remove osd mountpoint tree + ansible.builtin.file: path: /var/lib/ceph/osd/ state: absent register: remove_osd_mountpoints ignore_errors: true - - name: is reboot needed - command: echo requesting reboot + - name: Is reboot needed + ansible.builtin.command: echo requesting reboot delegate_to: localhost become: false notify: - - restart machine - - wait for server to boot - - remove data + - Restart machine + - Wait for server to boot + - Remove data + changed_when: false when: - reboot_osd_node | bool - remove_osd_mountpoints.failed is defined - - name: wipe table on dm-crypt devices - command: dmsetup wipe_table --force "{{ item }}" + - name: Wipe table on dm-crypt devices + ansible.builtin.command: dmsetup wipe_table --force "{{ item }}" with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + changed_when: false when: encrypted_ceph_partuuid.stdout_lines | length > 0 - - name: delete dm-crypt devices if any - command: dmsetup remove --retry --force {{ item }} + - name: Delete dm-crypt devices if any + ansible.builtin.command: dmsetup remove --retry --force {{ item }} with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + changed_when: false when: encrypted_ceph_partuuid.stdout_lines | length > 0 - - name: get payload_offset - shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa 306 + - name: Get payload_offset + ansible.builtin.shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa risky-shell-pipe register: payload_offset with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" + changed_when: false when: encrypted_ceph_partuuid.stdout_lines | length > 0 - - name: get physical sector size - command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }} + - name: Get physical sector size + ansible.builtin.command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }} changed_when: false with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}" when: encrypted_ceph_partuuid.stdout_lines | length > 0 register: phys_sector_size - - name: wipe dmcrypt device - command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct + - name: Wipe dmcrypt device + ansible.builtin.command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct changed_when: false with_together: - "{{ encrypted_ceph_partuuid.stdout_lines }}" - "{{ payload_offset.results }}" - "{{ phys_sector_size.results }}" - - name: get ceph data partitions - shell: | + - name: Get ceph data partitions + ansible.builtin.shell: | blkid -o device -t PARTLABEL="ceph data" changed_when: false failed_when: false register: ceph_data_partition_to_erase_path - - name: get ceph lockbox partitions - shell: | + - name: Get ceph lockbox partitions + ansible.builtin.shell: | blkid -o device -t PARTLABEL="ceph lockbox" changed_when: false failed_when: false register: ceph_lockbox_partition_to_erase_path - - name: see if ceph-volume is installed # noqa : 305 - shell: command -v ceph-volume + - name: See if ceph-volume is installed # noqa: command-instead-of-shell + ansible.builtin.shell: command -v ceph-volume changed_when: false failed_when: false register: ceph_volume_present when: not containerized_deployment | bool - - name: zap and destroy osds created by ceph-volume with lvm_volumes + - name: Zap and destroy osds created by ceph-volume with lvm_volumes ceph_volume: data: "{{ item.data }}" - data_vg: "{{ item.data_vg|default(omit) }}" - journal: "{{ item.journal|default(omit) }}" - journal_vg: "{{ item.journal_vg|default(omit) }}" - db: "{{ item.db|default(omit) }}" - db_vg: "{{ item.db_vg|default(omit) }}" - wal: "{{ item.wal|default(omit) }}" - wal_vg: "{{ item.wal_vg|default(omit) }}" + data_vg: "{{ item.data_vg | default(omit) }}" + journal: "{{ item.journal | default(omit) }}" + journal_vg: "{{ item.journal_vg | default(omit) }}" + db: "{{ item.db | default(omit) }}" + db_vg: "{{ item.db_vg | default(omit) }}" + wal: "{{ item.wal | default(omit) }}" + wal_vg: "{{ item.wal_vg | default(omit) }}" action: "zap" environment: CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" @@ -530,7 +553,7 @@ - containerized_deployment | bool or ceph_volume_present.rc == 0 - - name: zap and destroy osds created by ceph-volume with devices + - name: Zap and destroy osds created by ceph-volume with devices ceph_volume: data: "{{ item }}" action: "zap" @@ -546,36 +569,36 @@ - containerized_deployment | bool or ceph_volume_present.rc == 0 - - name: get ceph block partitions - shell: | + - name: Get ceph block partitions + ansible.builtin.shell: | blkid -o device -t PARTLABEL="ceph block" changed_when: false failed_when: false register: ceph_block_partition_to_erase_path - - name: get ceph journal partitions - shell: | + - name: Get ceph journal partitions + ansible.builtin.shell: | blkid -o device -t PARTLABEL="ceph journal" changed_when: false failed_when: false register: ceph_journal_partition_to_erase_path - - name: get ceph db partitions - shell: | + - name: Get ceph db partitions + ansible.builtin.shell: | blkid -o device -t PARTLABEL="ceph block.db" changed_when: false failed_when: false register: ceph_db_partition_to_erase_path - - name: get ceph wal partitions - shell: | + - name: Get ceph wal partitions + ansible.builtin.shell: | blkid -o device -t PARTLABEL="ceph block.wal" changed_when: false failed_when: false register: ceph_wal_partition_to_erase_path - - name: set_fact combined_devices_list - set_fact: + - name: Set_fact combined_devices_list + ansible.builtin.set_fact: combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines + ceph_lockbox_partition_to_erase_path.stdout_lines + ceph_block_partition_to_erase_path.stdout_lines + @@ -583,37 +606,37 @@ ceph_db_partition_to_erase_path.stdout_lines + ceph_wal_partition_to_erase_path.stdout_lines }}" - - name: resolve parent device - command: lsblk --nodeps -no pkname "{{ item }}" + - name: Resolve parent device + ansible.builtin.command: lsblk --nodeps -no pkname "{{ item }}" register: tmp_resolved_parent_device changed_when: false with_items: "{{ combined_devices_list }}" - - name: set_fact resolved_parent_device - set_fact: + - name: Set_fact resolved_parent_device + ansible.builtin.set_fact: resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}" - - name: wipe partitions - shell: | + - name: Wipe partitions + ansible.builtin.shell: | wipefs --all "{{ item }}" dd if=/dev/zero of="{{ item }}" bs=1 count=4096 changed_when: false with_items: "{{ combined_devices_list }}" - - name: check parent device partition - parted: + - name: Check parent device partition + community.general.parted: device: "/dev/{{ item }}" loop: "{{ resolved_parent_device }}" register: parted_info - - name: fail if there is a boot partition on the device - fail: + - name: Fail if there is a boot partition on the device + ansible.builtin.fail: msg: "{{ item.item }} has a boot partition" loop: "{{ parted_info.results }}" when: "'boot' in (item.partitions | map(attribute='flags') | list | flatten)" - - name: zap ceph journal/block db/block wal partitions # noqa 306 - shell: | + - name: Zap ceph journal/block db/block wal partitions # noqa risky-shell-pipe + ansible.builtin.shell: | sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}" dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200 parted -s /dev/"{{ item }}" mklabel gpt @@ -622,31 +645,31 @@ with_items: "{{ resolved_parent_device }}" changed_when: false - - name: remove ceph osd service - file: + - name: Remove ceph osd service + ansible.builtin.file: path: /etc/systemd/system/ceph-osd{{ item }} state: absent loop: - '@.service' - '.target' -- name: purge ceph mon cluster +- name: Purge ceph mon cluster hosts: mons gather_facts: false # already gathered previously become: true tasks: - - name: stop ceph mons with systemd - service: + - name: Stop ceph mons with systemd + ansible.builtin.service: name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}" state: stopped - enabled: no + enabled: false failed_when: false with_items: - mon - mgr - - name: remove monitor store and bootstrap keys - file: + - name: Remove monitor store and bootstrap keys + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -658,14 +681,14 @@ - /var/lib/ceph/bootstrap-mgr - /var/lib/ceph/tmp - - name: remove ceph mon and mgr service - file: + - name: Remove ceph mon and mgr service + ansible.builtin.file: path: "/etc/systemd/system/ceph-{{ item.0 }}{{ item.1 }}" state: absent loop: "{{ ['mon', 'mgr'] | product(['@.service', '.target']) | list }}" -- name: purge ceph-crash daemons +- name: Purge ceph-crash daemons hosts: - mons - osds @@ -676,34 +699,35 @@ gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop ceph-crash service - service: + - name: Stop ceph-crash service + ansible.builtin.service: name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" state: stopped - enabled: no + enabled: false failed_when: false - - name: systemctl reset-failed ceph-crash # noqa 303 - command: "systemctl reset-failed {{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" + - name: Systemctl reset-failed ceph-crash # noqa command-instead-of-module + ansible.builtin.command: "systemctl reset-failed {{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" changed_when: false failed_when: false - - name: remove service file - file: + - name: Remove service file + ansible.builtin.file: name: "/etc/systemd/system/ceph-crash{{ '@' if containerized_deployment | bool else '' }}.service" state: absent failed_when: false - - name: remove /var/lib/ceph/crash - file: + - name: Remove /var/lib/ceph/crash + ansible.builtin.file: path: /var/lib/ceph/crash state: absent -- name: check container hosts +- name: Check container hosts hosts: - mons - osds @@ -714,40 +738,42 @@ - mgrs become: true tasks: - - name: containerized_deployment only + - name: Containerized_deployment only when: containerized_deployment | bool block: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: remove stopped/exited containers - command: > + - name: Remove stopped/exited containers + ansible.builtin.command: > {{ container_binary }} container prune -f changed_when: false - - name: show container list on all the nodes (should be empty) - command: > + - name: Show container list on all the nodes (should be empty) + ansible.builtin.command: > {{ container_binary }} ps --filter='name=ceph' -a -q register: containers_list changed_when: false - - name: show container images on all the nodes (should be empty if tags was passed remove_img) - command: > + - name: Show container images on all the nodes (should be empty if tags was passed remove_img) + ansible.builtin.command: > {{ container_binary }} images register: images_list changed_when: false - - name: fail if container are still present - fail: + - name: Fail if container are still present + ansible.builtin.fail: msg: "It looks like container are still present." - when: containers_list.stdout_lines|length > 0 + when: containers_list.stdout_lines | length > 0 -- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data +- name: Final cleanup - check any running ceph, purge ceph packages, purge config and remove data vars: # When set to true both groups of packages are purged. # This can cause problem with qemu-kvm @@ -794,98 +820,100 @@ gather_facts: false # Already gathered previously become: true handlers: - - name: get osd data and lockbox mount points - shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" + - name: Get osd data and lockbox mount points + ansible.builtin.shell: "set -o pipefail && (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'" register: mounted_osd changed_when: false - listen: "remove data" + listen: "Remove data" - - name: umount osd data partition + - name: Umount osd data partition ansible.posix.mount: path: "{{ item }}" state: unmounted with_items: "{{ mounted_osd.stdout_lines }}" - listen: "remove data" + listen: "Remove data" - - name: remove data - shell: rm -rf /var/lib/ceph/* # noqa 302 - listen: "remove data" + - name: Remove data + ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa no-free-form + changed_when: false + listen: "Remove data" tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: non containerized related tasks + - name: Non containerized related tasks when: not containerized_deployment | bool block: - - name: purge ceph packages with yum - yum: + - name: Purge ceph packages with yum + ansible.builtin.yum: name: "{{ ceph_packages }}" state: absent when: ansible_facts['pkg_mgr'] == 'yum' - - name: purge ceph packages with dnf - dnf: + - name: Purge ceph packages with dnf + ansible.builtin.dnf: name: "{{ ceph_packages }}" state: absent when: ansible_facts['pkg_mgr'] == 'dnf' - - name: purge ceph packages with apt - apt: + - name: Purge ceph packages with apt + ansible.builtin.apt: name: "{{ ceph_packages }}" state: absent purge: true when: ansible_facts['pkg_mgr'] == 'apt' - - name: purge remaining ceph packages with yum - yum: + - name: Purge remaining ceph packages with yum + ansible.builtin.yum: name: "{{ ceph_remaining_packages }}" state: absent when: - ansible_facts['pkg_mgr'] == 'yum' - purge_all_packages | bool - - name: purge remaining ceph packages with dnf - dnf: + - name: Purge remaining ceph packages with dnf + ansible.builtin.dnf: name: "{{ ceph_remaining_packages }}" state: absent when: - ansible_facts['pkg_mgr'] == 'dnf' - purge_all_packages | bool - - name: purge remaining ceph packages with apt - apt: + - name: Purge remaining ceph packages with apt + ansible.builtin.apt: name: "{{ ceph_remaining_packages }}" state: absent when: - ansible_facts['pkg_mgr'] == 'apt' - purge_all_packages | bool - - name: purge extra packages with yum - yum: + - name: Purge extra packages with yum + ansible.builtin.yum: name: "{{ extra_packages }}" state: absent when: - ansible_facts['pkg_mgr'] == 'yum' - purge_all_packages | bool - - name: purge extra packages with dnf - dnf: + - name: Purge extra packages with dnf + ansible.builtin.dnf: name: "{{ extra_packages }}" state: absent when: - ansible_facts['pkg_mgr'] == 'dnf' - purge_all_packages | bool - - name: purge extra packages with apt - apt: + - name: Purge extra packages with apt + ansible.builtin.apt: name: "{{ extra_packages }}" state: absent when: - ansible_facts['pkg_mgr'] == 'apt' - purge_all_packages | bool - - name: remove config and any ceph socket left - file: + - name: Remove config and any ceph socket left + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -894,27 +922,30 @@ - /etc/haproxy - /run/ceph - - name: remove logs - file: - path: /var/log/ceph - state: absent + - name: Remove logs + ansible.builtin.file: + path: /var/log/ceph + state: absent - - name: request data removal - command: echo requesting data removal # noqa 301 + - name: Request data removal + ansible.builtin.command: echo requesting data removal # noqa no-changed-when become: false delegate_to: localhost - notify: remove data + notify: Remove data + changed_when: false - - name: purge dnf cache - command: dnf clean all + - name: Purge dnf cache + ansible.builtin.command: dnf clean all + changed_when: false when: ansible_facts['pkg_mgr'] == 'dnf' - - name: clean apt - command: apt-get clean # noqa 303 + - name: Clean apt + ansible.builtin.command: apt-get clean # noqa command-instead-of-module + changed_when: false when: ansible_facts['pkg_mgr'] == 'apt' - - name: purge ceph repo file in /etc/yum.repos.d - file: + - name: Purge ceph repo file in /etc/yum.repos.d + ansible.builtin.file: path: '/etc/yum.repos.d/{{ item }}.repo' state: absent with_items: @@ -922,44 +953,46 @@ - ceph_stable when: ansible_facts['os_family'] == 'RedHat' - - name: check for anything running ceph - command: "ps -u ceph -U ceph" + - name: Check for anything running ceph + ansible.builtin.command: "ps -u ceph -U ceph" register: check_for_running_ceph changed_when: false failed_when: check_for_running_ceph.rc == 0 - - name: find ceph systemd unit files to remove - find: + - name: Find ceph systemd unit files to remove + ansible.builtin.find: paths: "/etc/systemd/system" pattern: "ceph*" recurse: true file_type: any register: systemd_files - - name: remove ceph systemd unit files - file: + - name: Remove ceph systemd unit files + ansible.builtin.file: path: "{{ item.path }}" state: absent with_items: "{{ systemd_files.files }}" when: ansible_facts['service_mgr'] == 'systemd' - - name: containerized related tasks + - name: Containerized related tasks when: containerized_deployment | bool block: - - name: check if it is Atomic host - stat: path=/run/ostree-booted + - name: Check if it is Atomic host + ansible.builtin.stat: + path: /run/ostree-booted register: stat_ostree - - name: set fact for using Atomic host - set_fact: + - name: Set fact for using Atomic host + ansible.builtin.set_fact: is_atomic: "{{ stat_ostree.stat.exists }}" - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: remove ceph container image - command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" + - name: Remove ceph container image + ansible.builtin.command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" changed_when: false when: - inventory_hostname not in groups.get(client_group_name, []) @@ -967,11 +1000,11 @@ tags: - remove_img - - name: stop docker service - service: + - name: Stop docker service # noqa: ignore-errors + ansible.builtin.service: name: docker state: stopped - enabled: no + enabled: false when: - not is_atomic - container_binary == 'docker' @@ -979,55 +1012,57 @@ tags: - remove_docker - - name: remove docker on debian/ubuntu - apt: + - name: Remove docker on debian/ubuntu + ansible.builtin.apt: name: ['docker-ce', 'docker-engine', 'docker.io', 'python-docker', 'python3-docker'] state: absent - update_cache: yes - autoremove: yes + update_cache: true + autoremove: true when: ansible_facts['os_family'] == 'Debian' tags: - remove_docker - - name: red hat based systems tasks + - name: Red hat based systems tasks + when: + ansible_facts['os_family'] == 'RedHat' and + not is_atomic + tags: + - remove_docker block: - - name: yum related tasks on red hat + - name: Yum related tasks on red hat + when: ansible_facts['pkg_mgr'] == "yum" block: - - name: remove packages on redhat - yum: + - name: Remove packages on redhat + ansible.builtin.yum: name: ['epel-release', 'docker', 'python-docker-py'] state: absent - - name: remove package dependencies on redhat - command: yum -y autoremove + - name: Remove package dependencies on redhat + ansible.builtin.command: yum -y autoremove # noqa: command-instead-of-module + changed_when: false - - name: remove package dependencies on redhat again - command: yum -y autoremove - when: - ansible_facts['pkg_mgr'] == "yum" + - name: Remove package dependencies on redhat again + ansible.builtin.command: yum -y autoremove # noqa: command-instead-of-module + changed_when: false - - name: dnf related tasks on red hat + - name: Dnf related tasks on red hat + when: ansible_facts['pkg_mgr'] == "dnf" block: - - name: remove docker on redhat - dnf: + - name: Remove docker on redhat + ansible.builtin.dnf: name: ['docker', 'python3-docker'] state: absent - - name: remove package dependencies on redhat - command: dnf -y autoremove + - name: Remove package dependencies on redhat + ansible.builtin.command: dnf -y autoremove + changed_when: false - - name: remove package dependencies on redhat again - command: dnf -y autoremove - when: - ansible_facts['pkg_mgr'] == "dnf" - when: - ansible_facts['os_family'] == 'RedHat' and - not is_atomic - tags: - - remove_docker + - name: Remove package dependencies on redhat again + ansible.builtin.command: dnf -y autoremove + changed_when: false - - name: find any service-cid file left - find: + - name: Find any service-cid file left + ansible.builtin.find: paths: /run patterns: - "ceph-*.service-cid" @@ -1040,14 +1075,14 @@ - "alertmanager.service-cid" register: service_cid_files - - name: rm any service-cid file - file: + - name: Rm any service-cid file + ansible.builtin.file: path: "{{ item.path }}" state: absent with_items: "{{ service_cid_files.files }}" -- name: purge ceph directories +- name: Purge ceph directories hosts: - mons - osds @@ -1060,14 +1095,15 @@ gather_facts: false # Already gathered previously become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: purge ceph directories - containerized deployments + - name: Purge ceph directories - containerized deployments when: containerized_deployment | bool block: - - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket - file: + - name: Purge ceph directories and ceph socket + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -1076,34 +1112,35 @@ - /run/ceph - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh" - - name: remove ceph data - shell: rm -rf /var/lib/ceph/* # noqa 302 + - name: Remove ceph data + ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa: no-free-form changed_when: false - - name: remove /var/lib/ceph - file: + - name: Remove /var/lib/ceph + ansible.builtin.file: path: /var/lib/ceph state: absent # (todo): remove this when we are able to manage docker # service on atomic host. - - name: remove docker data - shell: rm -rf /var/lib/docker/* # noqa 302 + - name: Remove docker data + ansible.builtin.shell: rm -rf /var/lib/docker/* # noqa: no-free-form + changed_when: false when: not is_atomic | bool tags: - remove_docker -- name: purge fetch directory +- name: Purge fetch directory hosts: localhost gather_facts: false tasks: - - name: set fetch_directory value if not set - set_fact: + - name: Set fetch_directory value if not set + ansible.builtin.set_fact: fetch_directory: "fetch/" when: fetch_directory is not defined - - name: purge fetch directory for localhost - file: + - name: Purge fetch directory for localhost + ansible.builtin.file: path: "{{ fetch_directory | default('fetch/') }}" state: absent diff --git a/infrastructure-playbooks/purge-dashboard.yml b/infrastructure-playbooks/purge-dashboard.yml index b4a38bef22..e0f0228447 100644 --- a/infrastructure-playbooks/purge-dashboard.yml +++ b/infrastructure-playbooks/purge-dashboard.yml @@ -13,17 +13,17 @@ # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. -- name: confirm whether user really meant to purge the dashboard +- name: Confirm whether user really meant to purge the dashboard hosts: localhost gather_facts: false vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to purge the dashboard? default: 'no' - private: no + private: false tasks: - - name: exit playbook, if user did not mean to purge dashboard - fail: + - name: Exit playbook, if user did not mean to purge dashboard + ansible.builtin.fail: msg: > "Exiting purge-dashboard playbook, dashboard was NOT purged. To purge the dashboard, either say 'yes' on the prompt or @@ -31,18 +31,18 @@ invoking the playbook" when: ireallymeanit != 'yes' - - name: import_role ceph-defaults - import_role: + - name: Import_role ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - name: check if a legacy grafana-server group exists - import_role: + - name: Check if a legacy grafana-server group exists + ansible.builtin.import_role: name: ceph-facts tasks_from: convert_grafana_server_group_name.yml when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0 -- name: gather facts on all hosts +- name: Gather facts on all hosts hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -55,9 +55,11 @@ - "{{ monitoring_group_name | default('monitoring') }}" become: true tasks: - - debug: msg="gather facts on all Ceph hosts for following reference" + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" -- name: purge node exporter +- name: Purge node exporter hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -71,58 +73,62 @@ gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: disable node_exporter service - service: + - name: Disable node_exporter service + ansible.builtin.service: name: node_exporter state: stopped - enabled: no + enabled: false failed_when: false - - name: remove node_exporter service files - file: + - name: Remove node_exporter service files + ansible.builtin.file: name: "{{ item }}" state: absent loop: - /etc/systemd/system/node_exporter.service - /run/node_exporter.service-cid - - name: remove node-exporter image - command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" + - name: Remove node-exporter image + ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}" changed_when: false failed_when: false -- name: purge ceph monitoring +- name: Purge ceph monitoring hosts: "{{ monitoring_group_name | default('monitoring') }}" gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: stop services - service: + - name: Stop services + ansible.builtin.service: name: "{{ item }}" state: stopped - enabled: no + enabled: false failed_when: false loop: - alertmanager - prometheus - grafana-server - - name: remove systemd service files - file: + - name: Remove systemd service files + ansible.builtin.file: name: "{{ item }}" state: absent loop: @@ -133,8 +139,8 @@ - /run/prometheus.service-cid - /run/grafana-server.service-cid - - name: remove ceph dashboard container images - command: "{{ container_binary }} rmi {{ item }}" + - name: Remove ceph dashboard container images + ansible.builtin.command: "{{ container_binary }} rmi {{ item }}" loop: - "{{ alertmanager_container_image }}" - "{{ prometheus_container_image }}" @@ -142,16 +148,16 @@ changed_when: false failed_when: false - - name: remove ceph-grafana-dashboards package on RedHat or SUSE - package: + - name: Remove ceph-grafana-dashboards package on RedHat or SUSE + ansible.builtin.package: name: ceph-grafana-dashboards state: absent when: - not containerized_deployment | bool - ansible_facts['os_family'] in ['RedHat', 'Suse'] - - name: remove data - file: + - name: Remove data + ansible.builtin.file: name: "{{ item }}" state: absent loop: @@ -162,7 +168,7 @@ - "{{ prometheus_data_dir }}" - /var/lib/grafana -- name: purge ceph dashboard +- name: Purge ceph dashboard hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}" gather_facts: false become: true @@ -170,14 +176,16 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: remove the dashboard admin user + - name: Remove the dashboard admin user ceph_dashboard_user: name: "{{ dashboard_admin_user }}" cluster: "{{ cluster }}" @@ -185,7 +193,7 @@ run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" - - name: remove radosgw system user + - name: Remove radosgw system user radosgw_user: name: "{{ dashboard_rgw_api_user_id }}" cluster: "{{ cluster }}" @@ -194,7 +202,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: groups.get(rgw_group_name, []) | length > 0 - - name: disable mgr dashboard and prometheus modules + - name: Disable mgr dashboard and prometheus modules ceph_mgr_module: name: "{{ item }}" cluster: "{{ cluster }}" @@ -205,8 +213,8 @@ - dashboard - prometheus - - name: remove TLS certificate and key files - file: + - name: Remove TLS certificate and key files + ansible.builtin.file: name: "/etc/ceph/ceph-dashboard.{{ item }}" state: absent loop: @@ -214,8 +222,8 @@ - key when: dashboard_protocol == "https" - - name: remove ceph-mgr-dashboard package - package: + - name: Remove ceph-mgr-dashboard package + ansible.builtin.package: name: ceph-mgr-dashboard state: absent when: not containerized_deployment | bool diff --git a/infrastructure-playbooks/purge-iscsi-gateways.yml b/infrastructure-playbooks/purge-iscsi-gateways.yml index ec2247d2f4..6a5f453c9c 100644 --- a/infrastructure-playbooks/purge-iscsi-gateways.yml +++ b/infrastructure-playbooks/purge-iscsi-gateways.yml @@ -1,96 +1,97 @@ --- - - name: Confirm removal of the iSCSI gateway configuration hosts: localhost vars_prompt: - - name: purge_config + - name: Purge_config prompt: Which configuration elements should be purged? (all, lio or abort) default: 'abort' - private: no + private: false tasks: - name: Exit playbook if user aborted the purge - fail: + ansible.builtin.fail: msg: > "You have aborted the purge of the iSCSI gateway configuration" when: purge_config == 'abort' - - name: set_fact igw_purge_type - set_fact: + - name: Set_fact igw_purge_type + ansible.builtin.set_fact: igw_purge_type: "{{ purge_config }}" -- name: stopping the gateways +- name: Stopping the gateways hosts: - iscsigws - become: yes - vars: - - igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}" - + become: true tasks: - - name: stopping and disabling iscsi daemons - service: + - name: Stopping and disabling iscsi daemons + ansible.builtin.service: name: "{{ item }}" state: stopped - enabled: no + enabled: false with_items: - rbd-target-gw - rbd-target-api - tcmu-runner -- name: removing the gateway configuration +- name: Removing the gateway configuration hosts: - iscsigws - become: yes + become: true vars: - - igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}" + igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}" tasks: - - name: igw_purge | deleting configured rbd devices - igw_purge: mode="disks" + - name: Igw_purge | deleting configured rbd devices + igw_purge: + mode: "disks" when: igw_purge_type == 'all' run_once: true - - name: igw_purge | purging the gateway configuration - igw_purge: mode="gateway" + - name: Igw_purge | purging the gateway configuration + igw_purge: + mode: "gateway" run_once: true - - name: restart and enable iscsi daemons + - name: Restart and enable iscsi daemons when: igw_purge_type == 'lio' - service: + ansible.builtin.service: name: "{{ item }}" state: started - enabled: yes + enabled: true with_items: - tcmu-runner - rbd-target-api - rbd-target-gw -- name: remove the gateways from the ceph dashboard +- name: Remove the gateways from the ceph dashboard hosts: mons become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: iscsi gateways with ceph dashboard + - name: Iscsi gateways with ceph dashboard when: dashboard_enabled | bool run_once: true block: - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: set_fact container_exec_cmd - set_fact: + - name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" when: containerized_deployment | bool - - name: get iscsi gateway list - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json" + - name: Get iscsi gateway list + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json" changed_when: false register: gateways - - name: remove iscsi gateways - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}" + - name: Remove iscsi gateways + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}" with_items: '{{ (gateways.stdout | from_json)["gateways"] }}' + changed_when: false diff --git a/infrastructure-playbooks/rgw-add-users-buckets.yml b/infrastructure-playbooks/rgw-add-users-buckets.yml index b5b893774a..da2927a7bb 100644 --- a/infrastructure-playbooks/rgw-add-users-buckets.yml +++ b/infrastructure-playbooks/rgw-add-users-buckets.yml @@ -12,54 +12,54 @@ # admin_secret_key # # Additionally modify the users list and buckets list to create the -# users and buckets you want +# users and buckets you want # -- name: add rgw users and buckets +- name: Add rgw users and buckets connection: local hosts: localhost - gather_facts: no + gather_facts: false tasks: - - name: add rgw users and buckets - ceph_add_users_buckets: - rgw_host: '172.20.0.2' - port: 8000 - admin_access_key: '8W56BITCSX27CD555Z5B' - admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20' - users: - - username: 'test1' - fullname: 'tester' - email: 'dan1@email.com' - maxbucket: 666 - suspend: false - autogenkey: false - accesskey: 'B3AR4Q33L59YV56A9A2F' - secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' - userquota: true - usermaxsize: '1000' - usermaxobjects: 3 - bucketquota: true - bucketmaxsize: '1000' - bucketmaxobjects: 3 - - username: 'test2' - fullname: 'tester' - buckets: - - bucket: 'bucket1' - user: 'test2' - - bucket: 'bucket2' - user: 'test1' - - bucket: 'bucket3' - user: 'test1' - - bucket: 'bucket4' - user: 'test1' - - bucket: 'bucket5' - user: 'test1' - - bucket: 'bucket6' - user: 'test2' - - bucket: 'bucket7' - user: 'test2' - - bucket: 'bucket8' - user: 'test2' - - bucket: 'bucket9' - user: 'test2' - - bucket: 'bucket10' - user: 'test2' + - name: Add rgw users and buckets + ceph_add_users_buckets: + rgw_host: '172.20.0.2' + port: 8000 + admin_access_key: '8W56BITCSX27CD555Z5B' + admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20' + users: + - username: 'test1' + fullname: 'tester' + email: 'dan1@email.com' + maxbucket: 666 + suspend: false + autogenkey: false + accesskey: 'B3AR4Q33L59YV56A9A2F' + secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76' + userquota: true + usermaxsize: '1000' + usermaxobjects: 3 + bucketquota: true + bucketmaxsize: '1000' + bucketmaxobjects: 3 + - username: 'test2' + fullname: 'tester' + buckets: + - bucket: 'bucket1' + user: 'test2' + - bucket: 'bucket2' + user: 'test1' + - bucket: 'bucket3' + user: 'test1' + - bucket: 'bucket4' + user: 'test1' + - bucket: 'bucket5' + user: 'test1' + - bucket: 'bucket6' + user: 'test2' + - bucket: 'bucket7' + user: 'test2' + - bucket: 'bucket8' + user: 'test2' + - bucket: 'bucket9' + user: 'test2' + - bucket: 'bucket10' + user: 'test2' diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 8eef895d82..6daec7d4ff 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -12,23 +12,23 @@ # If you run Red Hat Ceph Storage and are doing a **major** update (e.g: from 2 to 3), you have to change the ceph_rhcs_version to a newer one # -- name: confirm whether user really meant to upgrade the cluster +- name: Confirm whether user really meant to upgrade the cluster hosts: localhost tags: always become: false gather_facts: false vars: - - mgr_group_name: mgrs + mgr_group_name: mgrs vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to upgrade the cluster? default: 'no' - private: no + private: false tasks: - - name: exit playbook, if user did not mean to upgrade cluster - fail: + - name: Exit playbook, if user did not mean to upgrade cluster + ansible.builtin.fail: msg: > "Exiting rolling_update.yml playbook, cluster was NOT upgraded. To upgrade the cluster, either say 'yes' on the prompt or @@ -36,18 +36,18 @@ invoking the playbook" when: ireallymeanit != 'yes' - - name: import_role ceph-defaults - import_role: + - name: Import_role ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - name: check if a legacy grafana-server group exists - import_role: + - name: Check if a legacy grafana-server group exists + ansible.builtin.import_role: name: ceph-facts tasks_from: convert_grafana_server_group_name.yml when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0 -- name: gather facts and check the init system +- name: Gather facts and check the init system hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -60,58 +60,66 @@ - "{{ iscsi_gw_group_name|default('iscsigws') }}" - "{{ monitoring_group_name|default('monitoring') }}" tags: always - any_errors_fatal: True - become: True - gather_facts: False + any_errors_fatal: true + become: true + gather_facts: false vars: - delegate_facts_host: True + delegate_facts_host: true tasks: - - debug: msg="gather facts on all Ceph hosts for following reference" + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: gather facts - setup: + - name: Gather facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, []) - - name: gather and delegate facts - setup: + - name: Gather and delegate facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' delegate_to: "{{ item }}" - delegate_facts: True + delegate_facts: true with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}" run_once: true when: delegate_facts_host | bool - - name: set_fact rolling_update - set_fact: + - name: Set_fact rolling_update + ansible.builtin.set_fact: rolling_update: true - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + - name: Import ceph-infra role + ansible.builtin.import_role: name: ceph-infra tags: ceph_infra - - import_role: + - name: Import ceph-validate role + ansible.builtin.import_role: name: ceph-validate - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine when: - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first) - (containerized_deployment | bool) or (dashboard_enabled | bool) - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common tasks_from: registry when: @@ -119,90 +127,98 @@ - (containerized_deployment | bool) or (dashboard_enabled | bool) - ceph_docker_registry_auth | bool - - name: check ceph release in container image + - name: Check ceph release in container image when: - groups.get(mon_group_name, []) | length > 0 - containerized_deployment | bool delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true block: - - name: get the ceph release being deployed - command: "{{ ceph_cmd }} --cluster {{ cluster }} --version" + - name: Get the ceph release being deployed + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} --version" register: ceph_version changed_when: false - - name: check ceph release being deployed - fail: + - name: Check ceph release being deployed + ansible.builtin.fail: msg: "This version of ceph-ansible is intended for upgrading to Ceph Squid only." when: "'squid' not in ceph_version.stdout.split()" -- name: upgrade ceph mon cluster +- name: Upgrade ceph mon cluster tags: mons vars: health_mon_check_retries: 5 health_mon_check_delay: 15 - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ mon_group_name|default('mons') }}" serial: 1 - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: upgrade ceph mon cluster + + - name: Upgrade ceph mon cluster block: - - name: remove ceph aliases - file: + - name: Remove ceph aliases + ansible.builtin.file: path: /etc/profile.d/ceph-aliases.sh state: absent when: containerized_deployment | bool - - name: set mon_host_count - set_fact: + - name: Set mon_host_count + ansible.builtin.set_fact: mon_host_count: "{{ groups[mon_group_name] | length }}" - - name: fail when less than three monitors - fail: + - name: Fail when less than three monitors + ansible.builtin.fail: msg: "Upgrade of cluster with less than three monitors is not supported." when: mon_host_count | int < 3 - - name: select a running monitor - set_fact: + - name: Select a running monitor + ansible.builtin.set_fact: mon_host: "{{ groups[mon_group_name] | difference([inventory_hostname]) | last }}" - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - block: - - name: get ceph cluster status - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health -f json" + - name: Check Ceph monitors quorum status + when: inventory_hostname == groups[mon_group_name] | first + block: + - name: Get ceph cluster status + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health -f json" register: check_cluster_health delegate_to: "{{ mon_host }}" + changed_when: false - - block: - - name: display ceph health detail - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail" + - name: Display health status before failing + when: (check_cluster_health.stdout | from_json).status == 'HEALTH_ERR' + block: + - name: Display ceph health detail + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail" delegate_to: "{{ mon_host }}" + changed_when: false - - name: fail if cluster isn't in an acceptable state - fail: + - name: Fail if cluster isn't in an acceptable state + ansible.builtin.fail: msg: "cluster is not in an acceptable state!" - when: (check_cluster_health.stdout | from_json).status == 'HEALTH_ERR' - - name: get the ceph quorum status - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" + - name: Get the ceph quorum status + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" register: check_quorum_status delegate_to: "{{ mon_host }}" + changed_when: false - - name: fail if the cluster quorum isn't in an acceptable state - fail: + - name: Fail if the cluster quorum isn't in an acceptable state + ansible.builtin.fail: msg: "cluster quorum is not in an acceptable state!" when: (check_quorum_status.stdout | from_json).quorum | length != groups[mon_group_name] | length - when: inventory_hostname == groups[mon_group_name] | first - - name: ensure /var/lib/ceph/bootstrap-rbd-mirror is present - file: + - name: Ensure /var/lib/ceph/bootstrap-rbd-mirror is present + ansible.builtin.file: path: /var/lib/ceph/bootstrap-rbd-mirror owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -214,7 +230,7 @@ - cephx | bool - inventory_hostname == groups[mon_group_name][0] - - name: create potentially missing keys (rbd and rbd-mirror) + - name: Create potentially missing keys (rbd and rbd-mirror) ceph_key: name: "client.{{ item.0 }}" dest: "/var/lib/ceph/{{ item.0 }}/" @@ -234,56 +250,65 @@ # NOTE: we mask the service so the RPM can't restart it # after the package gets upgraded - - name: stop ceph mon - systemd: + - name: Stop ceph mon + ansible.builtin.systemd: name: ceph-mon@{{ item }} state: stopped - enabled: no - masked: yes + enabled: false + masked: true with_items: - "{{ ansible_facts['hostname'] }}" - "{{ ansible_facts['fqdn'] }}" # only mask the service for mgr because it must be upgraded # after ALL monitors, even when collocated - - name: mask the mgr service - systemd: + - name: Mask the mgr service + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} - masked: yes + masked: true when: inventory_hostname in groups[mgr_group_name] | default([]) or groups[mgr_group_name] | default([]) | length == 0 - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-mon role + ansible.builtin.import_role: name: ceph-mon - - name: start ceph mgr - systemd: + - name: Start ceph mgr + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} state: started - enabled: yes - masked: no + enabled: true + masked: false when: inventory_hostname in groups[mgr_group_name] | default([]) or groups[mgr_group_name] | default([]) | length == 0 - - name: import_role ceph-facts - import_role: + - name: Import_role ceph-facts + ansible.builtin.import_role: name: ceph-facts tasks_from: set_monitor_address.yml delegate_to: "{{ groups[mon_group_name][0] }}" delegate_facts: true - - name: non container | waiting for the monitor to join the quorum... - command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json + - name: Non container | waiting for the monitor to join the quorum... + ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json register: ceph_health_raw until: - ceph_health_raw.rc == 0 @@ -291,10 +316,11 @@ hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]) retries: "{{ health_mon_check_retries }}" delay: "{{ health_mon_check_delay }}" + changed_when: false when: not containerized_deployment | bool - - name: container | waiting for the containerized monitor to join the quorum... - command: > + - name: Container | waiting for the containerized monitor to join the quorum... + ansible.builtin.command: > {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json register: ceph_health_raw until: @@ -303,164 +329,196 @@ hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]) retries: "{{ health_mon_check_retries }}" delay: "{{ health_mon_check_delay }}" + changed_when: false when: containerized_deployment | bool rescue: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: unmask the mon service - systemd: + - name: Unmask the mon service + ansible.builtin.systemd: name: ceph-mon@{{ ansible_facts['hostname'] }} - enabled: yes - masked: no + enabled: true + masked: false - - name: unmask the mgr service - systemd: + - name: Unmask the mgr service + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} - masked: no + masked: false when: inventory_hostname in groups[mgr_group_name] | default([]) or groups[mgr_group_name] | default([]) | length == 0 - - name: stop the playbook execution - fail: + - name: Stop the playbook execution + ansible.builtin.fail: msg: "There was an error during monitor upgrade. Please, check the previous task results." -- name: reset mon_host +- name: Reset mon_host hosts: "{{ mon_group_name|default('mons') }}" tags: always - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: reset mon_host fact - set_fact: + - name: Reset mon_host fact + ansible.builtin.set_fact: mon_host: "{{ groups[mon_group_name][0] }}" -- name: upgrade ceph mgr nodes when implicitly collocated on monitors +- name: Upgrade ceph mgr nodes when implicitly collocated on monitors vars: health_mon_check_retries: 5 health_mon_check_delay: 15 - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ mon_group_name|default('mons') }}" tags: mgrs serial: 1 - become: True + become: true gather_facts: false tasks: - - name: upgrade mgrs when no mgr group explicitly defined in inventory + - name: Upgrade mgrs when no mgr group explicitly defined in inventory when: groups.get(mgr_group_name, []) | length == 0 block: - - name: stop ceph mgr - systemd: + - name: Stop ceph mgr + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} state: stopped - masked: yes + masked: true - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-mgr role + ansible.builtin.import_role: name: ceph-mgr -- name: upgrade ceph mgr nodes +- name: Upgrade ceph mgr nodes vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true ceph_release: "{{ ceph_stable_release }}" hosts: "{{ mgr_group_name|default('mgrs') }}" tags: mgrs serial: 1 - become: True + become: true gather_facts: false tasks: # The following task has a failed_when: false # to handle the scenario where no mgr existed before the upgrade # or if we run a Ceph cluster before Luminous - - name: stop ceph mgr - systemd: + - name: Stop ceph mgr + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} state: stopped - enabled: no - masked: no + enabled: false + masked: false failed_when: false - - name: mask ceph mgr systemd unit - systemd: + - name: Mask ceph mgr systemd unit + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} - masked: yes + masked: true failed_when: false - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-mgr role + ansible.builtin.import_role: name: ceph-mgr -- name: set osd flags +- name: Set osd flags hosts: "{{ osd_group_name | default('osds') }}" tags: osds - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: set osd flags, disable autoscaler and balancer + - name: Set osd flags, disable autoscaler and balancer run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" block: - - name: get pool list - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" register: pool_list changed_when: false check_mode: false - - name: get balancer module status - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" register: balancer_status_update run_once: true changed_when: false check_mode: false - - name: set_fact pools_pgautoscaler_mode - set_fact: + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" with_items: "{{ pool_list.stdout | default('{}') | from_json }}" - - name: disable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" changed_when: false when: (balancer_status_update.stdout | from_json)['active'] | bool - - name: disable pg autoscale on pools + - name: Disable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -473,7 +531,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: set osd flags + - name: Set osd flags ceph_osd_flag: name: "{{ item }}" cluster: "{{ cluster }}" @@ -484,58 +542,70 @@ - noout - nodeep-scrub -- name: upgrade ceph osds cluster +- name: Upgrade ceph osds cluster vars: health_osd_check_retries: 600 health_osd_check_delay: 2 - upgrade_ceph_packages: True - hosts: "{{ osd_group_name|default('osds') }}" + upgrade_ceph_packages: true + hosts: osds tags: osds serial: 1 - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - name: get osd numbers - non container - shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306 + - name: Get osd numbers - non container + ansible.builtin.shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa: risky-shell-pipe register: osd_ids changed_when: false - - name: set num_osds - set_fact: - num_osds: "{{ osd_ids.stdout_lines|default([])|length }}" + - name: Set num_osds + ansible.builtin.set_fact: + num_osds: "{{ osd_ids.stdout_lines | default([]) | length }}" - - name: set_fact container_exec_cmd_osd - set_fact: + - name: Set_fact container_exec_cmd_osd + ansible.builtin.set_fact: container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" when: containerized_deployment | bool - - name: stop ceph osd - systemd: + - name: Stop ceph osd + ansible.builtin.systemd: name: ceph-osd@{{ item }} state: stopped - enabled: no - masked: yes + enabled: false + masked: true with_items: "{{ osd_ids.stdout_lines }}" - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-osd role + ansible.builtin.import_role: name: ceph-osd - - name: scan ceph-disk osds with ceph-volume if deploying nautilus + - name: Scan ceph-disk osds with ceph-volume if deploying nautilus ceph_volume_simple_scan: cluster: "{{ cluster }}" force: true @@ -543,7 +613,7 @@ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" when: not containerized_deployment | bool - - name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus + - name: Activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus ceph_volume_simple_activate: cluster: "{{ cluster }}" osd_all: true @@ -551,8 +621,8 @@ CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" when: not containerized_deployment | bool - - name: waiting for clean pgs... - command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} pg stat --format json" + - name: Waiting for clean pgs... + ansible.builtin.command: "{{ container_exec_cmd_update_osd | default('') }} ceph --cluster {{ cluster }} pg stat --format json" register: ceph_health_post until: > (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0) @@ -564,23 +634,26 @@ delay: "{{ health_osd_check_delay }}" -- name: complete osd upgrade +- name: Complete osd upgrade hosts: "{{ osd_group_name | default('osds') }}" tags: osds - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: unset osd flags, re-enable pg autoscaler and balancer + - name: Unset osd flags, re-enable pg autoscaler and balancer run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" block: - - name: re-enable pg autoscale on pools + - name: Re-enable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -593,7 +666,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: unset osd flags + - name: Unset osd flags ceph_osd_flag: name: "{{ item }}" cluster: "{{ cluster }}" @@ -605,29 +678,32 @@ - noout - nodeep-scrub - - name: re-enable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" changed_when: false when: (balancer_status_update.stdout | from_json)['active'] | bool -- name: upgrade ceph mdss cluster, deactivate all rank > 0 +- name: Upgrade ceph mdss cluster, deactivate all rank > 0 hosts: "{{ mon_group_name | default('mons') }}[0]" tags: mdss become: true gather_facts: false tasks: - - name: deactivate all mds rank > 0 + - name: Deactivate all mds rank > 0 when: groups.get(mds_group_name, []) | length > 0 block: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - name: deactivate all mds rank > 0 if any + - name: Deactivate all mds rank > 0 if any when: groups.get(mds_group_name, []) | length > 1 block: - - name: set max_mds 1 on ceph fs + - name: Set max_mds 1 on ceph fs ceph_fs: name: "{{ cephfs }}" cluster: "{{ cluster }}" @@ -638,7 +714,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: wait until only rank 0 is up + - name: Wait until only rank 0 is up ceph_fs: name: "{{ cephfs }}" cluster: "{{ cluster }}" @@ -651,148 +727,170 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: get name of remaining active mds - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" + - name: Get name of remaining active mds + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" changed_when: false register: _mds_active_name - - name: set_fact mds_active_name - set_fact: + - name: Set_fact mds_active_name + ansible.builtin.set_fact: mds_active_name: "{{ (_mds_active_name.stdout | from_json)['filesystems'][0]['mdsmap']['info'][item.key]['name'] }}" with_dict: "{{ (_mds_active_name.stdout | default('{}') | from_json).filesystems[0]['mdsmap']['info'] | default({}) }}" - - name: set_fact mds_active_host - set_fact: + - name: Set_fact mds_active_host + ansible.builtin.set_fact: mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}" with_items: "{{ groups[mds_group_name] }}" when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name - - name: create standby_mdss group - add_host: + - name: Create standby_mdss group + ansible.builtin.add_host: name: "{{ item }}" groups: standby_mdss ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}" ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}" with_items: "{{ groups[mds_group_name] | difference(mds_active_host) }}" - - name: stop standby ceph mds - systemd: + - name: Stop standby ceph mds + ansible.builtin.systemd: name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}" state: stopped - enabled: no + enabled: false delegate_to: "{{ item }}" with_items: "{{ groups['standby_mdss'] }}" when: groups['standby_mdss'] | default([]) | length > 0 # dedicated task for masking systemd unit # somehow, having a single task doesn't work in containerized context - - name: mask systemd units for standby ceph mds - systemd: + - name: Mask systemd units for standby ceph mds + ansible.builtin.systemd: name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}" - masked: yes + masked: true delegate_to: "{{ item }}" with_items: "{{ groups['standby_mdss'] }}" when: groups['standby_mdss'] | default([]) | length > 0 - - name: wait until all standbys mds are stopped - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" + - name: Wait until all standbys mds are stopped + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" changed_when: false register: wait_standbys_down retries: 300 delay: 5 until: (wait_standbys_down.stdout | from_json).standbys | length == 0 - - name: create active_mdss group - add_host: + - name: Create active_mdss group + ansible.builtin.add_host: name: "{{ mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0] }}" groups: active_mdss ansible_host: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_host'] | default(omit) }}" ansible_port: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_port'] | default(omit) }}" -- name: upgrade active mds +- name: Upgrade active mds vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: active_mdss tags: mdss become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - name: prevent restart from the packaging - systemd: + - name: Prevent restart from the packaging + ansible.builtin.systemd: name: ceph-mds@{{ ansible_facts['hostname'] }} - enabled: no - masked: yes + enabled: false + masked: true when: not containerized_deployment | bool - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-mds role + ansible.builtin.import_role: name: ceph-mds - - name: restart ceph mds - systemd: + - name: Restart ceph mds + ansible.builtin.systemd: name: ceph-mds@{{ ansible_facts['hostname'] }} state: restarted - enabled: yes - masked: no + enabled: true + masked: false when: not containerized_deployment | bool - - name: restart active mds - command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}" + - name: Restart active mds + ansible.builtin.command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}" changed_when: false when: containerized_deployment | bool -- name: upgrade standbys ceph mdss cluster +- name: Upgrade standbys ceph mdss cluster vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: standby_mdss tags: mdss - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - name: prevent restarts from the packaging - systemd: + - name: Prevent restarts from the packaging + ansible.builtin.systemd: name: ceph-mds@{{ ansible_facts['hostname'] }} - enabled: no - masked: yes + enabled: false + masked: true when: not containerized_deployment | bool - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-mds role + ansible.builtin.import_role: name: ceph-mds - - name: set max_mds + - name: Set max_mds ceph_fs: name: "{{ cephfs }}" cluster: "{{ cluster }}" @@ -806,211 +904,266 @@ when: inventory_hostname == groups['standby_mdss'] | last -- name: upgrade ceph rgws cluster +- name: Upgrade ceph rgws cluster vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ rgw_group_name|default('rgws') }}" tags: rgws serial: 1 - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - name: stop ceph rgw when upgrading from stable-3.2 - systemd: + - name: Stop ceph rgw when upgrading from stable-3.2 # noqa: ignore-errors + ansible.builtin.systemd: name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }} state: stopped - enabled: no - masked: yes - ignore_errors: True + enabled: false + masked: true + ignore_errors: true - - name: stop ceph rgw - systemd: + - name: Stop ceph rgw + ansible.builtin.systemd: name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} state: stopped - enabled: no - masked: yes + enabled: false + masked: true with_items: "{{ rgw_instances }}" - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-rgw role + ansible.builtin.import_role: name: ceph-rgw -- name: upgrade ceph rbd mirror node +- name: Upgrade ceph rbd mirror node vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" tags: rbdmirrors serial: 1 - become: True + become: true gather_facts: false tasks: - - name: check for ceph rbd mirror services - command: systemctl show --no-pager --property=Id --state=enabled ceph-rbd-mirror@* # noqa 303 + - name: Check for ceph rbd mirror services + ansible.builtin.command: systemctl show --no-pager --property=Id --state=enabled ceph-rbd-mirror@* # noqa command-instead-of-module changed_when: false register: rbdmirror_services - - name: stop ceph rbd mirror - service: + - name: Stop ceph rbd mirror + ansible.builtin.service: name: "{{ item.split('=')[1] }}" state: stopped - enabled: no - masked: yes + enabled: false + masked: true loop: "{{ rbdmirror_services.stdout_lines }}" - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-rbd-mirror role + ansible.builtin.import_role: name: ceph-rbd-mirror -- name: upgrade ceph nfs node +- name: Upgrade ceph nfs node vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ nfs_group_name|default('nfss') }}" tags: nfss serial: 1 - become: True + become: true gather_facts: false tasks: # failed_when: false is here so that if we upgrade # from a version of ceph that does not have nfs-ganesha # then this task will not fail - - name: stop ceph nfs - systemd: + - name: Stop ceph nfs + ansible.builtin.systemd: name: nfs-ganesha state: stopped - enabled: no - masked: yes + enabled: false + masked: true failed_when: false when: not containerized_deployment | bool - - name: systemd stop nfs container - systemd: + - name: Systemd stop nfs container + ansible.builtin.systemd: name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} state: stopped - enabled: no - masked: yes + enabled: false + masked: true failed_when: false when: - ceph_nfs_enable_service | bool - containerized_deployment | bool - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-nfs role + ansible.builtin.import_role: name: ceph-nfs -- name: upgrade ceph iscsi gateway node +- name: Upgrade ceph iscsi gateway node vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}" tags: iscsigws serial: 1 - become: True + become: true gather_facts: false tasks: # failed_when: false is here so that if we upgrade # from a version of ceph that does not have iscsi gws # then this task will not fail - - name: stop ceph iscsi services - systemd: + - name: Stop ceph iscsi services + ansible.builtin.systemd: name: '{{ item }}' state: stopped - enabled: no - masked: yes + enabled: false + masked: true failed_when: false with_items: - rbd-target-api - rbd-target-gw - tcmu-runner - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: containerized_deployment | bool - - import_role: + + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config - - import_role: + + - name: Import ceph-iscsi-gw role + ansible.builtin.import_role: name: ceph-iscsi-gw -- name: upgrade ceph client node +- name: Upgrade ceph client node vars: - upgrade_ceph_packages: True + upgrade_ceph_packages: true hosts: "{{ client_group_name|default('clients') }}" tags: clients serial: "{{ client_update_batch | default(20) }}" - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml when: containerized_deployment | bool - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-common role + ansible.builtin.import_role: name: ceph-common when: not containerized_deployment | bool - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common when: - (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first) - containerized_deployment | bool -- name: upgrade ceph-crash daemons +- name: Upgrade ceph-crash daemons hosts: - "{{ mon_group_name | default('mons') }}" - "{{ osd_group_name | default('osds') }}" @@ -1024,57 +1177,66 @@ gather_facts: false become: true tasks: - - name: stop the ceph-crash service - systemd: + - name: Stop the ceph-crash service + ansible.builtin.systemd: name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" state: stopped # it needs to be done in a separate task otherwise the stop just before doesn't work. - - name: mask and disable the ceph-crash service - systemd: + - name: Mask and disable the ceph-crash service + ansible.builtin.systemd: name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" - enabled: no - masked: yes + enabled: false + masked: true - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-crash role + ansible.builtin.import_role: name: ceph-crash -- name: complete upgrade +- name: Complete upgrade hosts: "{{ mon_group_name | default('mons') }}" tags: post_upgrade - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: container | disallow pre-reef OSDs and enable all new reef-only functionality - command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release squid" + - name: Container | disallow pre-reef OSDs and enable all new reef-only functionality + ansible.builtin.command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release squid" delegate_to: "{{ groups[mon_group_name][0] }}" - run_once: True + run_once: true + changed_when: false when: - containerized_deployment | bool - groups.get(mon_group_name, []) | length > 0 - - name: non container | disallow pre-reef OSDs and enable all new reef-only functionality - command: "ceph --cluster {{ cluster }} osd require-osd-release squid" + - name: Non container | disallow pre-reef OSDs and enable all new reef-only functionality + ansible.builtin.command: "ceph --cluster {{ cluster }} osd require-osd-release squid" delegate_to: "{{ groups[mon_group_name][0] }}" - run_once: True + run_once: true + changed_when: false when: - not containerized_deployment | bool - groups.get(mon_group_name, []) | length > 0 -- name: upgrade node-exporter +- name: Upgrade node-exporter hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" @@ -1089,45 +1251,51 @@ gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: with dashboard configuration + - name: With dashboard configuration when: dashboard_enabled | bool block: - - name: stop node-exporter - service: + - name: Stop node-exporter + ansible.builtin.service: name: node_exporter state: stopped failed_when: false - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common tasks_from: registry when: - not containerized_deployment | bool - ceph_docker_registry_auth | bool - - import_role: + - name: Import ceph-node-exporter role + ansible.builtin.import_role: name: ceph-node-exporter -- name: upgrade monitoring node +- name: Upgrade monitoring node hosts: "{{ monitoring_group_name|default('monitoring') }}" tags: monitoring gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: with dashboard configuration + - name: With dashboard configuration when: dashboard_enabled | bool block: - - name: stop monitoring services - service: + - name: Stop monitoring services + ansible.builtin.service: name: '{{ item }}' state: stopped failed_when: false @@ -1136,100 +1304,114 @@ - prometheus - grafana-server - - import_role: - name: ceph-facts - - import_role: + # - name: Import ceph-facts role + # ansible.builtin.import_role: + # name: ceph-facts + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: grafana - - import_role: + - name: Import ceph-prometheus role + ansible.builtin.import_role: name: ceph-prometheus - - import_role: + - name: Import ceph-grafana role + ansible.builtin.import_role: name: ceph-grafana -- name: upgrade ceph dashboard +- name: Upgrade ceph dashboard hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}" tags: monitoring gather_facts: false become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: with dashboard configuration + - name: With dashboard configuration when: dashboard_enabled | bool block: - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: grafana - - import_role: + + - name: Import ceph-dashboard role + ansible.builtin.import_role: name: ceph-dashboard -- name: switch any existing crush buckets to straw2 +- name: Switch any existing crush buckets to straw2 hosts: "{{ mon_group_name | default('mons') }}[0]" tags: post_upgrade become: true any_errors_fatal: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: set_fact ceph_cmd - set_fact: + - name: Set_fact ceph_cmd + ansible.builtin.set_fact: ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" - - name: backup the crushmap - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd getcrushmap -o /etc/ceph/{{ cluster }}-crushmap" + - name: Backup the crushmap + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd getcrushmap -o /etc/ceph/{{ cluster }}-crushmap" changed_when: false - - block: - - name: switch crush buckets to straw2 - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd crush set-all-straw-buckets-to-straw2" + - name: Migrate crush buckets to straw2 + block: + - name: Switch crush buckets to straw2 + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd crush set-all-straw-buckets-to-straw2" changed_when: false rescue: - - name: restore the crushmap - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd setcrushmap -i /etc/ceph/{{ cluster }}-crushmap" + - name: Restore the crushmap + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd setcrushmap -i /etc/ceph/{{ cluster }}-crushmap" changed_when: false - - name: inform that the switch to straw2 buckets failed - fail: + - name: Inform that the switch to straw2 buckets failed + ansible.builtin.fail: msg: > "An attempt to switch to straw2 bucket was made but failed. Check the cluster status." - - name: remove crushmap backup - file: + - name: Remove crushmap backup + ansible.builtin.file: path: /etc/ceph/{{ cluster }}-crushmap state: absent -- name: show ceph status +- name: Show ceph status hosts: "{{ mon_group_name|default('mons') }}" tags: always - become: True + become: true gather_facts: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: set_fact container_exec_cmd_status - set_fact: + - name: Set_fact container_exec_cmd_status + ansible.builtin.set_fact: container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" when: containerized_deployment | bool - - name: show ceph status - command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s" + - name: Show ceph status + ansible.builtin.command: "{{ container_exec_cmd_status | default('') }} ceph --cluster {{ cluster }} -s" changed_when: false - run_once: True + run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" - - name: show all daemons version - command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions" - run_once: True + - name: Show all daemons version + ansible.builtin.command: "{{ container_exec_cmd_status | default('') }} ceph --cluster {{ cluster }} versions" + run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false diff --git a/infrastructure-playbooks/shrink-mds.yml b/infrastructure-playbooks/shrink-mds.yml index 04b8e1daa8..fc9450bf69 100644 --- a/infrastructure-playbooks/shrink-mds.yml +++ b/infrastructure-playbooks/shrink-mds.yml @@ -9,35 +9,41 @@ # ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. -- name: gather facts and check the init system +- name: Gather facts and check the init system hosts: - "{{ mon_group_name | default('mons') }}" - "{{ mds_group_name | default('mdss') }}" become: true tasks: - - debug: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: msg: gather facts on all Ceph hosts for following reference - - import_role: + + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary -- name: perform checks, remove mds and print cluster health +- name: Perform checks, remove mds and print cluster health hosts: mons[0] become: true vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' - private: no + private: false pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: exit playbook, if no mds was given + - name: Exit playbook, if no mds was given when: mds_to_kill is not defined - fail: + ansible.builtin.fail: msg: > mds_to_kill must be declared. Exiting shrink-cluster playbook, no MDS was removed. On the command @@ -45,106 +51,109 @@ "-e mds_to_kill=ceph-mds1" argument. You can only remove a single MDS each time the playbook runs." - - name: exit playbook, if the mds is not part of the inventory + - name: Exit playbook, if the mds is not part of the inventory when: mds_to_kill not in groups[mds_group_name] - fail: + ansible.builtin.fail: msg: "It seems that the host given is not part of your inventory, please make sure it is." - - name: exit playbook, if user did not mean to shrink cluster + - name: Exit playbook, if user did not mean to shrink cluster when: ireallymeanit != 'yes' - fail: + ansible.builtin.fail: msg: "Exiting shrink-mds playbook, no mds was removed. To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" - - name: set_fact container_exec_cmd for mon0 - set_fact: + - name: Set_fact container_exec_cmd for mon0 + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" when: containerized_deployment | bool - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" changed_when: false register: ceph_health until: ceph_health is succeeded retries: 5 delay: 2 - - name: set_fact mds_to_kill_hostname - set_fact: + - name: Set_fact mds_to_kill_hostname + ansible.builtin.set_fact: mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}" tasks: # get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also # removes the MDS from the FS map. - - name: exit mds when containerized deployment - command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit" + - name: Exit mds when containerized deployment + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit" changed_when: false when: containerized_deployment | bool - - name: get ceph status - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" + - name: Get ceph status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" register: ceph_status changed_when: false - - name: set_fact current_max_mds - set_fact: + - name: Set_fact current_max_mds + ansible.builtin.set_fact: current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}" - - name: fail if removing that mds node wouldn't satisfy max_mds anymore - fail: + - name: Fail if removing that mds node wouldn't satisfy max_mds anymore + ansible.builtin.fail: msg: "Can't remove more mds as it won't satisfy current max_mds setting" when: - ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int - (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1 - - name: stop mds service and verify it + - name: Stop mds service and verify it block: - - name: stop mds service - service: + - name: Stop mds service + ansible.builtin.service: name: ceph-mds@{{ mds_to_kill_hostname }} state: stopped - enabled: no + enabled: false delegate_to: "{{ mds_to_kill }}" failed_when: false - - name: ensure that the mds is stopped - command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa 303 + - name: Ensure that the mds is stopped + ansible.builtin.command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa command-instead-of-module register: mds_to_kill_status failed_when: mds_to_kill_status.rc == 0 delegate_to: "{{ mds_to_kill }}" retries: 5 delay: 2 + changed_when: false - - name: fail if the mds is reported as active or standby + - name: Fail if the mds is reported as active or standby block: - - name: get new ceph status - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" + - name: Get new ceph status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json" register: ceph_status + changed_when: false - - name: get active mds nodes list - set_fact: + - name: Get active mds nodes list + ansible.builtin.set_fact: active_mdss: "{{ active_mdss | default([]) + [item.name] }}" with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}" - - name: get ceph fs dump status - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" + - name: Get ceph fs dump status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json" register: ceph_fs_status + changed_when: false - - name: create a list of standby mdss - set_fact: + - name: Create a list of standby mdss + ansible.builtin.set_fact: standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list - - name: fail if mds just killed is being reported as active or standby - fail: + - name: Fail if mds just killed is being reported as active or standby + ansible.builtin.fail: msg: "mds node {{ mds_to_kill }} still up and running." when: - (mds_to_kill in active_mdss | default([])) or (mds_to_kill in standby_mdss | default([])) - - name: delete the filesystem when killing last mds + - name: Delete the filesystem when killing last mds ceph_fs: name: "{{ cephfs }}" cluster: "{{ cluster }}" @@ -156,13 +165,13 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: purge mds store - file: + - name: Purge mds store + ansible.builtin.file: path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }} state: absent delegate_to: "{{ mds_to_kill }}" post_tasks: - - name: show ceph health - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" changed_when: false diff --git a/infrastructure-playbooks/shrink-mgr.yml b/infrastructure-playbooks/shrink-mgr.yml index 378ab661a4..91360b5076 100644 --- a/infrastructure-playbooks/shrink-mgr.yml +++ b/infrastructure-playbooks/shrink-mgr.yml @@ -11,62 +11,66 @@ # automation scripts to avoid interactive prompt. -- name: gather facts and check the init system +- name: Gather facts and check the init system hosts: - "{{ mon_group_name | default('mons') }}" - "{{ mgr_group_name | default('mgrs') }}" become: true tasks: - - debug: + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: msg: gather facts on all Ceph hosts for following reference -- name: confirm if user really meant to remove manager from the ceph cluster +- name: Confirm if user really meant to remove manager from the ceph cluster hosts: mons[0] become: true vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' - private: no + private: false pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: set_fact container_exec_cmd + - name: Set_fact container_exec_cmd when: containerized_deployment | bool - set_fact: + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health changed_when: false until: ceph_health is succeeded retries: 5 delay: 2 - - name: get total number of mgrs in cluster + - name: Get total number of mgrs in cluster block: - - name: save mgr dump output - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" + - name: Save mgr dump output + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" register: mgr_dump + changed_when: false - - name: get active and standbys mgr list - set_fact: + - name: Get active and standbys mgr list + ansible.builtin.set_fact: active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}" standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}" - - name: exit playbook, if there's no standby manager - fail: + - name: Exit playbook, if there's no standby manager + ansible.builtin.fail: msg: "You are about to shrink the only manager present in the cluster." when: standbys_mgr | length | int < 1 - - name: exit playbook, if no manager was given - fail: + - name: Exit playbook, if no manager was given + ansible.builtin.fail: msg: "mgr_to_kill must be declared Exiting shrink-cluster playbook, no manager was removed. On the command line when invoking the playbook, you can use @@ -74,46 +78,47 @@ manager each time the playbook runs." when: mgr_to_kill is not defined - - name: exit playbook, if user did not mean to shrink cluster - fail: + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: msg: "Exiting shrink-mgr playbook, no manager was removed. To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" when: ireallymeanit != 'yes' - - name: set_fact mgr_to_kill_hostname - set_fact: + - name: Set_fact mgr_to_kill_hostname + ansible.builtin.set_fact: mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}" - - name: exit playbook, if the selected manager is not present in the cluster - fail: + - name: Exit playbook, if the selected manager is not present in the cluster + ansible.builtin.fail: msg: "It seems that the host given is not present in the cluster." when: - mgr_to_kill_hostname not in active_mgr - mgr_to_kill_hostname not in standbys_mgr tasks: - - name: stop manager services and verify it + - name: Stop manager services and verify it block: - - name: stop manager service - service: + - name: Stop manager service + ansible.builtin.service: name: ceph-mgr@{{ mgr_to_kill_hostname }} state: stopped - enabled: no + enabled: false delegate_to: "{{ mgr_to_kill }}" failed_when: false - - name: ensure that the mgr is stopped - command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa 303 + - name: Ensure that the mgr is stopped + ansible.builtin.command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa command-instead-of-module register: mgr_to_kill_status failed_when: mgr_to_kill_status.rc == 0 delegate_to: "{{ mgr_to_kill }}" + changed_when: false retries: 5 delay: 2 - - name: fail if the mgr is reported in ceph mgr dump - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" + - name: Fail if the mgr is reported in ceph mgr dump + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" register: mgr_dump changed_when: false failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list) @@ -121,13 +126,13 @@ retries: 12 delay: 10 - - name: purge manager store - file: + - name: Purge manager store + ansible.builtin.file: path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }} state: absent delegate_to: "{{ mgr_to_kill }}" post_tasks: - - name: show ceph health - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" changed_when: false diff --git a/infrastructure-playbooks/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml index 1bf1f7c46a..6746243012 100644 --- a/infrastructure-playbooks/shrink-mon.yml +++ b/infrastructure-playbooks/shrink-mon.yml @@ -12,75 +12,79 @@ # automation scripts to avoid interactive prompt. -- name: gather facts and check the init system +- name: Gather facts and check the init system hosts: "{{ mon_group_name|default('mons') }}" become: true tasks: - - debug: msg="gather facts on all Ceph hosts for following reference" + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" -- name: confirm whether user really meant to remove monitor from the ceph cluster +- name: Confirm whether user really meant to remove monitor from the ceph cluster hosts: mons[0] become: true vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' - private: no + private: false vars: mon_group_name: mons pre_tasks: - - name: exit playbook, if only one monitor is present in cluster - fail: + - name: Exit playbook, if only one monitor is present in cluster + ansible.builtin.fail: msg: "You are about to shrink the only monitor present in the cluster. If you really want to do that, please use the purge-cluster playbook." when: groups[mon_group_name] | length | int == 1 - - name: exit playbook, if no monitor was given - fail: + - name: Exit playbook, if no monitor was given + ansible.builtin.fail: msg: "mon_to_kill must be declared Exiting shrink-cluster playbook, no monitor was removed. On the command line when invoking the playbook, you can use -e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs." when: mon_to_kill is not defined - - name: exit playbook, if the monitor is not part of the inventory - fail: + - name: Exit playbook, if the monitor is not part of the inventory + ansible.builtin.fail: msg: "It seems that the host given is not part of your inventory, please make sure it is." when: mon_to_kill not in groups[mon_group_name] - - name: exit playbook, if user did not mean to shrink cluster - fail: + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: msg: "Exiting shrink-mon playbook, no monitor was removed. To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" when: ireallymeanit != 'yes' - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary tasks: - - name: pick a monitor different than the one we want to remove - set_fact: + - name: Pick a monitor different than the one we want to remove + ansible.builtin.set_fact: mon_host: "{{ item }}" with_items: "{{ groups[mon_group_name] }}" when: item != mon_to_kill - - name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)" - set_fact: + - name: Set container_exec_cmd fact + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}" when: containerized_deployment | bool - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health changed_when: false until: ceph_health.stdout.find("HEALTH") > -1 @@ -88,33 +92,33 @@ retries: 5 delay: 2 - - name: set_fact mon_to_kill_hostname - set_fact: + - name: Set_fact mon_to_kill_hostname + ansible.builtin.set_fact: mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}" - - name: stop monitor service(s) - service: + - name: Stop monitor service(s) + ansible.builtin.service: name: ceph-mon@{{ mon_to_kill_hostname }} state: stopped - enabled: no + enabled: false delegate_to: "{{ mon_to_kill }}" failed_when: false - - name: purge monitor store - file: + - name: Purge monitor store + ansible.builtin.file: path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }} state: absent delegate_to: "{{ mon_to_kill }}" - - name: remove monitor from the quorum - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}" + - name: Remove monitor from the quorum + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}" changed_when: false failed_when: false delegate_to: "{{ mon_host }}" post_tasks: - - name: verify the monitor is out of the cluster - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" + - name: Verify the monitor is out of the cluster + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json" delegate_to: "{{ mon_host }}" changed_when: false failed_when: false @@ -123,25 +127,25 @@ retries: 2 delay: 10 - - name: please remove the monitor from your ceph configuration file - debug: - msg: "The monitor has been successfully removed from the cluster. - Please remove the monitor entry from the rest of your ceph configuration files, cluster wide." + - name: Please remove the monitor from your ceph configuration file + ansible.builtin.debug: + msg: "The monitor has been successfully removed from the cluster. + Please remove the monitor entry from the rest of your ceph configuration files, cluster wide." run_once: true when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names'] - - name: fail if monitor is still part of the cluster - fail: - msg: "Monitor appears to still be part of the cluster, please check what happened." + - name: Fail if monitor is still part of the cluster + ansible.builtin.fail: + msg: "Monitor appears to still be part of the cluster, please check what happened." run_once: true when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names'] - - name: show ceph health - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ mon_host }}" changed_when: false - - name: show ceph mon status - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat" + - name: Show ceph mon status + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat" delegate_to: "{{ mon_host }}" changed_when: false diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index 09573da8bc..9114424a1e 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -11,102 +11,101 @@ # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. -- name: gather facts and check the init system - +- name: Gather facts and check the init system hosts: - mons - osds - become: True + become: true tasks: - - debug: msg="gather facts on all Ceph hosts for following reference" - -- name: confirm whether user really meant to remove osd(s) from the cluster + - name: Gather facts on all Ceph hosts for following reference + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" +- name: Confirm whether user really meant to remove osd(s) from the cluster hosts: mons[0] - become: true - vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' - private: no - + private: false vars: mon_group_name: mons osd_group_name: osds pre_tasks: - - name: exit playbook, if user did not mean to shrink cluster - fail: + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: msg: "Exiting shrink-osd playbook, no osd(s) was/were removed.. To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" when: ireallymeanit != 'yes' - - name: exit playbook, if no osd(s) was/were given - fail: + - name: Exit playbook, if no osd(s) was/were given + ansible.builtin.fail: msg: "osd_to_kill must be declared Exiting shrink-osd playbook, no OSD(s) was/were removed. On the command line when invoking the playbook, you can use -e osd_to_kill=0,1,2,3 argument." when: osd_to_kill is not defined - - name: check the osd ids passed have the correct format - fail: + - name: Check the osd ids passed have the correct format + ansible.builtin.fail: msg: "The id {{ item }} has wrong format, please pass the number only" with_items: "{{ osd_to_kill.split(',') }}" when: not item is regex("^\d+$") tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary post_tasks: - - name: set_fact container_exec_cmd build docker exec command (containerized) - set_fact: + - name: Set_fact container_exec_cmd build docker exec command (containerized) + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" when: containerized_deployment | bool - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health changed_when: false until: ceph_health.stdout.find("HEALTH") > -1 retries: 5 delay: 2 - - name: find the host(s) where the osd(s) is/are running on - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" + - name: Find the host(s) where the osd(s) is/are running on + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}" changed_when: false with_items: "{{ osd_to_kill.split(',') }}" register: find_osd_hosts - - name: set_fact osd_hosts - set_fact: - osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item ] ] }}" + - name: Set_fact osd_hosts + ansible.builtin.set_fact: + osd_hosts: "{{ osd_hosts | default([]) + [[(item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item]] }}" with_items: "{{ find_osd_hosts.results }}" - - name: set_fact _osd_hosts - set_fact: + - name: Set_fact _osd_hosts + ansible.builtin.set_fact: _osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}" with_nested: - "{{ groups.get(osd_group_name) }}" - "{{ osd_hosts }}" when: hostvars[item.0]['ansible_facts']['hostname'] == item.1 - - name: set_fact host_list - set_fact: + - name: Set_fact host_list + ansible.builtin.set_fact: host_list: "{{ host_list | default([]) | union([item.0]) }}" loop: "{{ _osd_hosts }}" - - name: get ceph-volume lvm list data + - name: Get ceph-volume lvm list data ceph_volume: cluster: "{{ cluster }}" action: list @@ -117,12 +116,12 @@ delegate_to: "{{ item }}" loop: "{{ host_list }}" - - name: set_fact _lvm_list - set_fact: + - name: Set_fact _lvm_list + ansible.builtin.set_fact: _lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}" with_items: "{{ _lvm_list_data.results }}" - - name: refresh /etc/ceph/osd files non containerized_deployment + - name: Refresh /etc/ceph/osd files non containerized_deployment ceph_volume_simple_scan: cluster: "{{ cluster }}" force: true @@ -130,8 +129,8 @@ loop: "{{ host_list }}" when: not containerized_deployment | bool - - name: get osd unit status - systemd: + - name: Get osd unit status + ansible.builtin.systemd: name: ceph-osd@{{ item.2 }} register: osd_status delegate_to: "{{ item.0 }}" @@ -139,8 +138,8 @@ when: - containerized_deployment | bool - - name: refresh /etc/ceph/osd files containerized_deployment - command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" + - name: Refresh /etc/ceph/osd files containerized_deployment + ansible.builtin.command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" changed_when: false delegate_to: "{{ item.0 }}" loop: "{{ _osd_hosts }}" @@ -149,10 +148,13 @@ - item.2 not in _lvm_list.keys() - osd_status.results[0].status.ActiveState == 'active' - - name: refresh /etc/ceph/osd files containerized_deployment when OSD container is down + - name: Refresh /etc/ceph/osd files containerized_deployment when OSD container is down + when: + - containerized_deployment | bool + - osd_status.results[0].status.ActiveState != 'active' block: - - name: create tmp osd folder - file: + - name: Create tmp osd folder + ansible.builtin.file: path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} state: directory mode: '0755' @@ -160,8 +162,8 @@ when: item.2 not in _lvm_list.keys() loop: "{{ _osd_hosts }}" - - name: activate OSD - command: | + - name: Activate OSD + ansible.builtin.command: | {{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1 -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared @@ -179,8 +181,8 @@ when: item.2 not in _lvm_list.keys() loop: "{{ _osd_hosts }}" - - name: simple scan - command: | + - name: Simple scan + ansible.builtin.command: | {{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1 -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared @@ -198,28 +200,24 @@ when: item.2 not in _lvm_list.keys() loop: "{{ _osd_hosts }}" - - name: umount OSD temp folder - mount: + - name: Umount OSD temp folder + ansible.posix.mount: path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} state: unmounted delegate_to: "{{ item.0 }}" when: item.2 not in _lvm_list.keys() loop: "{{ _osd_hosts }}" - - name: remove OSD temp folder - file: + - name: Remove OSD temp folder + ansible.builtin.file: path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }} state: absent delegate_to: "{{ item.0 }}" when: item.2 not in _lvm_list.keys() loop: "{{ _osd_hosts }}" - when: - - containerized_deployment | bool - - osd_status.results[0].status.ActiveState != 'active' - - - name: find /etc/ceph/osd files - find: + - name: Find /etc/ceph/osd files + ansible.builtin.find: paths: /etc/ceph/osd pattern: "{{ item.2 }}-*" register: ceph_osd_data @@ -227,8 +225,8 @@ loop: "{{ _osd_hosts }}" when: item.2 not in _lvm_list.keys() - - name: slurp ceph osd files content - slurp: + - name: Slurp ceph osd files content + ansible.builtin.slurp: src: "{{ item['files'][0]['path'] }}" delegate_to: "{{ item.item.0 }}" register: ceph_osd_files_content @@ -237,13 +235,13 @@ - item.skipped is undefined - item.matched > 0 - - name: set_fact ceph_osd_files_json - set_fact: + - name: Set_fact ceph_osd_files_json + ansible.builtin.set_fact: ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}" with_items: "{{ ceph_osd_files_content.results }}" when: item.skipped is undefined - - name: mark osd(s) out of the cluster + - name: Mark osd(s) out of the cluster ceph_osd: ids: "{{ osd_to_kill.split(',') }}" cluster: "{{ cluster }}" @@ -253,15 +251,15 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" run_once: true - - name: stop osd(s) service - service: + - name: Stop osd(s) service + ansible.builtin.service: name: ceph-osd@{{ item.2 }} state: stopped - enabled: no + enabled: false loop: "{{ _osd_hosts }}" delegate_to: "{{ item.0 }}" - - name: umount osd lockbox + - name: Umount osd lockbox ansible.posix.mount: path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}" state: absent @@ -273,7 +271,7 @@ - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool - ceph_osd_data_json[item.2]['data']['uuid'] is defined - - name: umount osd data + - name: Umount osd data ansible.posix.mount: path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" state: absent @@ -281,36 +279,38 @@ delegate_to: "{{ item.0 }}" when: not containerized_deployment | bool - - name: get parent device for data partition - command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}" + - name: Get parent device for data partition + ansible.builtin.command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}" register: parent_device_data_part loop: "{{ _osd_hosts }}" delegate_to: "{{ item.0 }}" + changed_when: false when: - item.2 not in _lvm_list.keys() - ceph_osd_data_json[item.2]['data']['path'] is defined - - name: add pkname information in ceph_osd_data_json - set_fact: - ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout }}, recursive=True) }}" + - name: Add pkname information in ceph_osd_data_json + ansible.builtin.set_fact: + ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout}}, recursive=True) }}" loop: "{{ parent_device_data_part.results }}" when: item.skipped is undefined - - name: close dmcrypt close on devices if needed - command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}" + - name: Close dmcrypt close on devices if needed + ansible.builtin.command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}" with_nested: - "{{ _osd_hosts }}" - - [ 'block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt' ] + - ['block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt'] delegate_to: "{{ item.0 }}" failed_when: false register: result until: result is succeeded + changed_when: false when: - item.2 not in _lvm_list.keys() - ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool - ceph_osd_data_json[item.2][item.3] is defined - - name: use ceph-volume lvm zap to destroy all partitions + - name: Use ceph-volume lvm zap to destroy all partitions ceph_volume: cluster: "{{ cluster }}" action: zap @@ -321,7 +321,7 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" with_nested: - "{{ _osd_hosts }}" - - [ 'block', 'block.db', 'block.wal', 'journal', 'data' ] + - ['block', 'block.db', 'block.wal', 'journal', 'data'] delegate_to: "{{ item.0 }}" failed_when: false register: result @@ -329,7 +329,7 @@ - item.2 not in _lvm_list.keys() - ceph_osd_data_json[item.2][item.3] is defined - - name: zap osd devices + - name: Zap osd devices ceph_volume: action: "zap" osd_fsid: "{{ item.1 }}" @@ -341,7 +341,7 @@ loop: "{{ _osd_hosts }}" when: item.2 in _lvm_list.keys() - - name: ensure osds are marked down + - name: Ensure osds are marked down ceph_osd: ids: "{{ osd_to_kill.split(',') }}" cluster: "{{ cluster }}" @@ -352,7 +352,7 @@ run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" - - name: purge osd(s) from the cluster + - name: Purge osd(s) from the cluster ceph_osd: ids: "{{ item }}" cluster: "{{ cluster }}" @@ -363,17 +363,17 @@ run_once: true with_items: "{{ osd_to_kill.split(',') }}" - - name: remove osd data dir - file: + - name: Remove osd data dir + ansible.builtin.file: path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}" state: absent loop: "{{ _osd_hosts }}" delegate_to: "{{ item.0 }}" - - name: show ceph health - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s" changed_when: false - - name: show ceph osd tree - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree" + - name: Show ceph osd tree + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree" changed_when: false diff --git a/infrastructure-playbooks/shrink-rbdmirror.yml b/infrastructure-playbooks/shrink-rbdmirror.yml index 79f8327d0c..dd43080a5d 100644 --- a/infrastructure-playbooks/shrink-rbdmirror.yml +++ b/infrastructure-playbooks/shrink-rbdmirror.yml @@ -11,34 +11,37 @@ # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. -- name: gather facts and check the init system +- name: Gather facts and check the init system hosts: - mons - rbdmirrors become: true tasks: - - debug: + - name: Gather facts on MONs and RBD mirrors + ansible.builtin.debug: msg: gather facts on MONs and RBD mirrors -- name: confirm whether user really meant to remove rbd mirror from the ceph +- name: Confirm whether user really meant to remove rbd mirror from the ceph cluster hosts: mons[0] become: true vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' - private: no + private: false pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: exit playbook, if no rbdmirror was given - fail: + - name: Exit playbook, if no rbdmirror was given + ansible.builtin.fail: msg: "rbdmirror_to_kill must be declared Exiting shrink-cluster playbook, no RBD mirror was removed. On the command line when invoking the playbook, you can use @@ -46,68 +49,68 @@ single rbd mirror each time the playbook runs." when: rbdmirror_to_kill is not defined - - name: exit playbook, if the rbdmirror is not part of the inventory - fail: + - name: Exit playbook, if the rbdmirror is not part of the inventory + ansible.builtin.fail: msg: > It seems that the host given is not part of your inventory, please make sure it is. when: rbdmirror_to_kill not in groups[rbdmirror_group_name] - - name: exit playbook, if user did not mean to shrink cluster - fail: + - name: Exit playbook, if user did not mean to shrink cluster + ansible.builtin.fail: msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed. To shrink the cluster, either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" when: ireallymeanit != 'yes' - - name: set_fact container_exec_cmd for mon0 + - name: Set_fact container_exec_cmd for mon0 when: containerized_deployment | bool - set_fact: + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" register: ceph_health changed_when: false until: ceph_health is succeeded retries: 5 delay: 2 - - name: set_fact rbdmirror_to_kill_hostname - set_fact: + - name: Set_fact rbdmirror_to_kill_hostname + ansible.builtin.set_fact: rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}" - - name: set_fact rbdmirror_gids - set_fact: - rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}" + - name: Set_fact rbdmirror_gids + ansible.builtin.set_fact: + rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [item] }}" with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}" when: item != 'summary' - - name: set_fact rbdmirror_to_kill_gid - set_fact: + - name: Set_fact rbdmirror_to_kill_gid + ansible.builtin.set_fact: rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}" with_items: "{{ rbdmirror_gids }}" when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname tasks: - - name: stop rbdmirror service - service: + - name: Stop rbdmirror service + ansible.builtin.service: name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }} state: stopped - enabled: no + enabled: false delegate_to: "{{ rbdmirror_to_kill }}" failed_when: false - - name: purge related directories - file: + - name: Purge related directories + ansible.builtin.file: path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }} state: absent delegate_to: "{{ rbdmirror_to_kill }}" post_tasks: - - name: get servicemap details - command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" + - name: Get servicemap details + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json" register: ceph_health failed_when: - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list" @@ -115,10 +118,11 @@ until: - "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list" - rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list + changed_when: false when: rbdmirror_to_kill_gid is defined retries: 12 delay: 10 - - name: show ceph health - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" changed_when: false diff --git a/infrastructure-playbooks/shrink-rgw.yml b/infrastructure-playbooks/shrink-rgw.yml index b416289752..de47b3f983 100644 --- a/infrastructure-playbooks/shrink-rgw.yml +++ b/infrastructure-playbooks/shrink-rgw.yml @@ -11,19 +11,19 @@ # automation scripts to avoid interactive prompt. -- name: confirm whether user really meant to remove rgw from the ceph cluster +- name: Confirm whether user really meant to remove rgw from the ceph cluster hosts: localhost become: false gather_facts: false vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to shrink the cluster? default: 'no' - private: no + private: false tasks: - - name: exit playbook, if no rgw was given + - name: Exit playbook, if no rgw was given when: rgw_to_kill is not defined or rgw_to_kill | length == 0 - fail: + ansible.builtin.fail: msg: > rgw_to_kill must be declared. Exiting shrink-cluster playbook, no RGW was removed. On the command @@ -31,82 +31,85 @@ "-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single RGW each time the playbook runs. - - name: exit playbook, if user did not mean to shrink cluster + - name: Exit playbook, if user did not mean to shrink cluster when: ireallymeanit != 'yes' - fail: + ansible.builtin.fail: msg: > Exiting shrink-mon playbook, no monitor was removed. To shrink the cluster, either say 'yes' on the prompt or use '-e ireallymeanit=yes' on the command line when invoking the playbook -- name: gather facts and mons and rgws +- name: Gather facts and mons and rgws hosts: - "{{ mon_group_name | default('mons') }}[0]" - "{{ rgw_group_name | default('rgws') }}" become: true gather_facts: false tasks: - - name: gather facts - setup: + - name: Gather facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' -- hosts: mons[0] +- name: Shrink rgw service + hosts: mons[0] become: true gather_facts: false pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary - - name: set_fact container_exec_cmd for mon0 - set_fact: + - name: Set_fact container_exec_cmd for mon0 + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" when: containerized_deployment | bool - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health changed_when: false until: ceph_health is succeeded retries: 5 delay: 2 - - name: get rgw instances - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" + - name: Get rgw instances + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" register: rgw_instances changed_when: false - - name: exit playbook, if the rgw_to_kill doesn't exist + - name: Exit playbook, if the rgw_to_kill doesn't exist when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list - fail: + ansible.builtin.fail: msg: > It seems that the rgw instance given is not part of the ceph cluster. Please make sure it is. The rgw instance format is $(hostname}.rgw$(instance number). tasks: - - name: get rgw host running the rgw instance to kill - set_fact: + - name: Get rgw host running the rgw instance to kill + ansible.builtin.set_fact: rgw_host: '{{ item }}' with_items: '{{ groups[rgw_group_name] }}' when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0] - - name: stop rgw service - service: + - name: Stop rgw service + ansible.builtin.service: name: ceph-radosgw@rgw.{{ rgw_to_kill }} state: stopped - enabled: no + enabled: false delegate_to: "{{ rgw_host }}" failed_when: false - - name: ensure that the rgw is stopped - command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa 303 + - name: Ensure that the rgw is stopped + ansible.builtin.command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa command-instead-of-module register: rgw_to_kill_status failed_when: rgw_to_kill_status.rc == 0 changed_when: false @@ -114,8 +117,8 @@ retries: 5 delay: 2 - - name: exit if rgw_to_kill is reported in ceph status - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" + - name: Exit if rgw_to_kill is reported in ceph status + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json" register: ceph_status changed_when: false failed_when: @@ -127,12 +130,12 @@ retries: 3 delay: 3 - - name: purge directories related to rgw - file: + - name: Purge directories related to rgw + ansible.builtin.file: path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }} state: absent delegate_to: "{{ rgw_host }}" post_tasks: - - name: show ceph health - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" changed_when: false diff --git a/infrastructure-playbooks/storage-inventory.yml b/infrastructure-playbooks/storage-inventory.yml index 00eb3f29bd..988d3078ae 100644 --- a/infrastructure-playbooks/storage-inventory.yml +++ b/infrastructure-playbooks/storage-inventory.yml @@ -5,26 +5,23 @@ # Usage: # ansible-playbook storage-inventory.yml -- name: gather facts and check the init system - - hosts: "{{ osd_group_name|default('osds') }}" - +- name: Gather facts and check the init system + hosts: osds become: true - tasks: - - debug: msg="gather facts on all Ceph hosts for following reference" - -- name: query each host for storage device inventory - - hosts: "{{ osd_group_name|default('osds') }}" + - name: Gather facts on all Ceph hosts + ansible.builtin.debug: + msg: "gather facts on all Ceph hosts for following reference" +- name: Query each host for storage device inventory + hosts: osds become: true - tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: list storage inventory + - name: List storage inventory ceph_volume: action: "inventory" environment: diff --git a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml index 45d4b5c49e..ad92baec39 100644 --- a/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml +++ b/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml @@ -1,29 +1,30 @@ --- # This playbook switches from non-containerized to containerized Ceph daemons -- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons +- name: Confirm whether user really meant to switch from non-containerized to containerized ceph daemons hosts: localhost gather_facts: false any_errors_fatal: true vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons? default: 'no' - private: no + private: false tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: fail when less than three monitors - fail: + - name: Fail when less than three monitors + ansible.builtin.fail: msg: "This playbook requires at least three monitors." when: groups[mon_group_name] | length | int < 3 - - name: exit playbook, if user did not mean to switch from non-containerized to containerized daemons? - fail: + - name: Exit playbook, if user did not mean to switch from non-containerized to containerized daemons? + ansible.builtin.fail: msg: > "Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook, cluster did not switch from non-containerized to containerized ceph daemons. @@ -33,7 +34,7 @@ when: ireallymeanit != 'yes' -- name: gather facts +- name: Gather facts hosts: - "{{ mon_group_name|default('mons') }}" @@ -47,52 +48,57 @@ become: true vars: - delegate_facts_host: True + delegate_facts_host: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: gather and delegate facts - setup: + - name: Gather and delegate facts + ansible.builtin.setup: gather_subset: - 'all' - '!facter' - '!ohai' delegate_to: "{{ item }}" - delegate_facts: True + delegate_facts: true with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}" run_once: true when: delegate_facts_host | bool tags: always - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + + - name: Import ceph-validate role + ansible.builtin.import_role: name: ceph-validate -- name: switching from non-containerized to containerized ceph mon +- name: Switching from non-containerized to containerized ceph mon vars: containerized_deployment: true - switch_to_containers: True - mon_group_name: mons + switch_to_containers: true + mon_group_name: mons hosts: "{{ mon_group_name|default('mons') }}" serial: 1 become: true pre_tasks: - - name: select a running monitor - set_fact: mon_host={{ item }} + - name: Select a running monitor + ansible.builtin.set_fact: + mon_host: "{{ item }}" with_items: "{{ groups[mon_group_name] }}" when: item != inventory_hostname - - name: stop non-containerized ceph mon - service: + - name: Stop non-containerized ceph mon + ansible.builtin.service: name: "ceph-mon@{{ ansible_facts['hostname'] }}" state: stopped - enabled: no + enabled: false - - name: remove old systemd unit files - file: + - name: Remove old systemd unit files + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -101,61 +107,67 @@ - /lib/systemd/system/ceph-mon@.service - /lib/systemd/system/ceph-mon.target - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false - - name: check for existing old leveldb file extension (ldb) - shell: stat /var/lib/ceph/mon/*/store.db/*.ldb + - name: Check for existing old leveldb file extension (ldb) + ansible.builtin.shell: stat /var/lib/ceph/mon/*/store.db/*.ldb changed_when: false failed_when: false register: ldb_files - - name: rename leveldb extension from ldb to sst - shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb + - name: Rename leveldb extension from ldb to sst + ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb changed_when: false failed_when: false when: ldb_files.rc == 0 - - name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common - command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring + - name: Copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common + ansible.builtin.command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring args: creates: /etc/ceph/{{ cluster }}.mon.keyring changed_when: false failed_when: false tasks: - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-mon role + ansible.builtin.import_role: name: ceph-mon post_tasks: - - name: waiting for the monitor to join the quorum... - command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json" + - name: Waiting for the monitor to join the quorum... + ansible.builtin.command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json" register: ceph_health_raw until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"] changed_when: false retries: "{{ health_mon_check_retries }}" delay: "{{ health_mon_check_delay }}" -- name: switching from non-containerized to containerized ceph mgr +- name: Switching from non-containerized to containerized ceph mgr hosts: "{{ mgr_group_name|default('mgrs') }}" @@ -169,15 +181,15 @@ # failed_when: false is here because if we're # working with a jewel cluster then ceph mgr # will not exist - - name: stop non-containerized ceph mgr(s) - service: + - name: Stop non-containerized ceph mgr(s) + ansible.builtin.service: name: "ceph-mgr@{{ ansible_facts['hostname'] }}" state: stopped - enabled: no + enabled: false failed_when: false - - name: remove old systemd unit files - file: + - name: Remove old systemd unit files + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -186,66 +198,75 @@ - /lib/systemd/system/ceph-mgr@.service - /lib/systemd/system/ceph-mgr.target - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false tasks: - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-mgr role + ansible.builtin.import_role: name: ceph-mgr -- name: set osd flags +- name: Set osd flags hosts: "{{ mon_group_name | default('mons') }}[0]" - become: True + become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: get pool list - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" register: pool_list changed_when: false check_mode: false - - name: get balancer module status - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" register: balancer_status_switch changed_when: false check_mode: false - - name: set_fact pools_pgautoscaler_mode - set_fact: + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" with_items: "{{ pool_list.stdout | default('{}') | from_json }}" - - name: disable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" changed_when: false when: (balancer_status_switch.stdout | from_json)['active'] | bool - - name: disable pg autoscale on pools + - name: Disable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -258,7 +279,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: set osd flags + - name: Set osd flags ceph_osd_flag: name: "{{ item }}" cluster: "{{ cluster }}" @@ -270,12 +291,12 @@ - nodeep-scrub -- name: switching from non-containerized to containerized ceph osd +- name: Switching from non-containerized to containerized ceph osd vars: containerized_deployment: true osd_group_name: osds - switch_to_containers: True + switch_to_containers: true hosts: "{{ osd_group_name|default('osds') }}" @@ -283,11 +304,12 @@ become: true pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: collect running osds - shell: | + - name: Collect running osds + ansible.builtin.shell: | set -o pipefail; systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume' register: running_osds @@ -295,28 +317,28 @@ failed_when: false # systemd module does not support --runtime option - - name: disable ceph-osd@.service runtime-enabled - command: "systemctl disable --runtime {{ item }}" # noqa 303 + - name: Disable ceph-osd@.service runtime-enabled + ansible.builtin.command: "systemctl disable --runtime {{ item }}" # noqa command-instead-of-module changed_when: false failed_when: false with_items: "{{ running_osds.stdout_lines | default([]) }}" when: item.startswith('ceph-osd@') - - name: stop/disable/mask non-containerized ceph osd(s) (if any) - systemd: + - name: Stop/disable/mask non-containerized ceph osd(s) (if any) + ansible.builtin.systemd: name: "{{ item }}" state: stopped - enabled: no + enabled: false with_items: "{{ running_osds.stdout_lines | default([]) }}" when: running_osds != [] - - name: disable ceph.target - systemd: + - name: Disable ceph.target + ansible.builtin.systemd: name: ceph.target - enabled: no + enabled: false - - name: remove old ceph-osd systemd units - file: + - name: Remove old ceph-osd systemd units + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -327,44 +349,45 @@ - /lib/systemd/system/ceph-osd@.service - /lib/systemd/system/ceph-volume@.service - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false - - name: check for existing old leveldb file extension (ldb) - shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb + - name: Check for existing old leveldb file extension (ldb) + ansible.builtin.shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb changed_when: false failed_when: false register: ldb_files - - name: rename leveldb extension from ldb to sst - shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb + - name: Rename leveldb extension from ldb to sst + ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb changed_when: false failed_when: false when: ldb_files.rc == 0 - - name: check if containerized osds are already running - command: > + - name: Check if containerized osds are already running + ansible.builtin.command: > {{ container_binary }} ps -q --filter='name=ceph-osd' changed_when: false failed_when: false register: osd_running - - name: get osd directories - command: > + - name: Get osd directories + ansible.builtin.command: > find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d register: osd_dirs changed_when: false failed_when: false - - name: unmount all the osd directories - command: > + - name: Unmount all the osd directories + ansible.builtin.command: > umount {{ item }} changed_when: false failed_when: false @@ -372,21 +395,25 @@ when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0 tasks: - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-osd role + ansible.builtin.import_role: name: ceph-osd post_tasks: - - name: container - waiting for clean pgs... - command: > + - name: Container - waiting for clean pgs... + ansible.builtin.command: > {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json register: ceph_health_post until: > @@ -399,17 +426,20 @@ changed_when: false -- name: unset osd flags +- name: Unset osd flags hosts: "{{ mon_group_name | default('mons') }}[0]" - become: True + become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - name: re-enable pg autoscale on pools + - name: Re-enable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -422,7 +452,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: unset osd flags + - name: Unset osd flags ceph_osd_flag: name: "{{ item }}" cluster: "{{ cluster }}" @@ -434,13 +464,13 @@ - noout - nodeep-scrub - - name: re-enable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" changed_when: false when: (balancer_status_switch.stdout | from_json)['active'] | bool -- name: switching from non-containerized to containerized ceph mds +- name: Switching from non-containerized to containerized ceph mds hosts: "{{ mds_group_name|default('mdss') }}" @@ -452,14 +482,14 @@ become: true pre_tasks: - - name: stop non-containerized ceph mds(s) - service: + - name: Stop non-containerized ceph mds(s) + ansible.builtin.service: name: "ceph-mds@{{ ansible_facts['hostname'] }}" state: stopped - enabled: no + enabled: false - - name: remove old systemd unit files - file: + - name: Remove old systemd unit files + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -468,34 +498,40 @@ - /lib/systemd/system/ceph-mds@.service - /lib/systemd/system/ceph-mds.target - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false tasks: - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: - name: ceph-mds + - name: Import ceph-mds role + ansible.builtin.import_role: + name: ceph-mds -- name: switching from non-containerized to containerized ceph rgw +- name: Switching from non-containerized to containerized ceph rgw hosts: "{{ rgw_group_name|default('rgws') }}" @@ -506,33 +542,36 @@ serial: 1 become: true pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + - name: Import ceph-config role + ansible.builtin.import_role: name: ceph-config tasks_from: rgw_systemd_environment_file.yml # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false tasks: - - name: stop non-containerized ceph rgw(s) - service: + - name: Stop non-containerized ceph rgw(s) + ansible.builtin.service: name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: stopped - enabled: no + enabled: false with_items: "{{ rgw_instances }}" - - name: remove old systemd unit files - file: + - name: Remove old systemd unit files + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -541,20 +580,24 @@ - /lib/systemd/system/ceph-radosgw@.service - /lib/systemd/system/ceph-radosgw.target - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-rgw role + ansible.builtin.import_role: name: ceph-rgw -- name: switching from non-containerized to containerized ceph rbd-mirror +- name: Switching from non-containerized to containerized ceph rbd-mirror hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}" @@ -565,21 +608,21 @@ serial: 1 become: true pre_tasks: - - name: check for ceph rbd mirror services - command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa 303 + - name: Check for ceph rbd mirror services + ansible.builtin.command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa: command-instead-of-module changed_when: false register: rbdmirror_services - - name: stop non-containerized ceph rbd mirror(s) - service: + - name: Stop non-containerized ceph rbd mirror(s) # noqa: ignore-errors + ansible.builtin.service: name: "{{ item.split('=')[1] }}" state: stopped - enabled: no + enabled: false ignore_errors: true loop: "{{ rbdmirror_services.stdout_lines }}" - - name: remove old systemd unit files - file: + - name: Remove old systemd unit files + ansible.builtin.file: path: "{{ item }}" state: absent with_items: @@ -588,34 +631,40 @@ - /lib/systemd/system/ceph-rbd-mirror@.service - /lib/systemd/system/ceph-rbd-mirror.target - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false tasks: - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-rbd-mirror role + ansible.builtin.import_role: name: ceph-rbd-mirror -- name: switching from non-containerized to containerized ceph nfs +- name: Switching from non-containerized to containerized ceph nfs hosts: "{{ nfs_group_name|default('nfss') }}" @@ -630,40 +679,46 @@ # failed_when: false is here because if we're # working with a jewel cluster then ceph nfs # will not exist - - name: stop non-containerized ceph nfs(s) - service: + - name: Stop non-containerized ceph nfs(s) + ansible.builtin.service: name: nfs-ganesha state: stopped - enabled: no + enabled: false failed_when: false - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false tasks: - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-nfs role + ansible.builtin.import_role: name: ceph-nfs -- name: switching from non-containerized to containerized iscsigws +- name: Switching from non-containerized to containerized iscsigws hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}" vars: containerized_deployment: true @@ -671,21 +726,22 @@ become: true serial: 1 pre_tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - name: stop iscsigw services - service: + - name: Stop iscsigw services + ansible.builtin.service: name: "{{ item }}" state: stopped - enabled: no + enabled: false with_items: - tcmu-runner - rbd-target-gw - rbd-target-api - - name: remove old systemd unit files - file: + - name: Remove old systemd unit files + ansible.builtin.file: path: "/usr/lib/systemd/system/{{ item }}.service" state: absent with_items: @@ -693,29 +749,34 @@ - rbd-target-gw - rbd-target-api tasks: - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false - - import_role: + - name: Import ceph-container-engine role + ansible.builtin.import_role: name: ceph-container-engine - - import_role: + - name: Import ceph-container-common role + ansible.builtin.import_role: name: ceph-container-common - - import_role: + - name: Import ceph-iscsi-gw role + ansible.builtin.import_role: name: ceph-iscsi-gw -- name: switching from non-containerized to containerized ceph-crash +- name: Switching from non-containerized to containerized ceph-crash hosts: - "{{ mon_group_name | default('mons') }}" @@ -729,26 +790,30 @@ containerized_deployment: true become: true tasks: - - name: stop non-containerized ceph-crash - service: + - name: Stop non-containerized ceph-crash + ansible.builtin.service: name: ceph-crash state: stopped - enabled: no + enabled: false - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml - - import_role: + - name: Import ceph-handler role + ansible.builtin.import_role: name: ceph-handler - - import_role: + - name: Import ceph-crash role + ansible.builtin.import_role: name: ceph-crash -- name: final task +- name: Final task hosts: - "{{ mon_group_name|default('mons') }}" - "{{ mgr_group_name|default('mgrs') }}" @@ -759,11 +824,12 @@ containerized_deployment: true become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults # NOTE: changed from file module to raw find command for performance reasons # The file module has to run checks on current ownership of all directories and files. This is unnecessary # as in this case we know we want all owned by ceph user - - name: set proper ownership on ceph directories - command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" + - name: Set proper ownership on ceph directories + ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +" changed_when: false diff --git a/infrastructure-playbooks/take-over-existing-cluster.yml b/infrastructure-playbooks/take-over-existing-cluster.yml index 9c913a15e5..2b230a41fd 100644 --- a/infrastructure-playbooks/take-over-existing-cluster.yml +++ b/infrastructure-playbooks/take-over-existing-cluster.yml @@ -11,45 +11,51 @@ # 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`. # 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`. -- hosts: mons - become: True +- name: Fetch keys + hosts: mons + become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults - - import_role: + + - name: Import ceph-fetch-keys role + ansible.builtin.import_role: name: ceph-fetch-keys -- hosts: - - mons - - osds - - mdss - - rgws - - nfss - - rbdmirrors - - clients - - mgrs - - iscsi-gw +- name: Take over existing cluster + hosts: + - mons + - osds + - mdss + - rgws + - nfss + - rbdmirrors + - clients + - mgrs + - iscsi-gw become: true tasks: - - import_role: + - name: Import ceph-defaults role + ansible.builtin.import_role: name: ceph-defaults post_tasks: - - name: get the name of the existing ceph cluster - shell: | + - name: Get the name of the existing ceph cluster + ansible.builtin.shell: | set -o pipefail; basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1) changed_when: false register: cluster_name - - name: "stat {{ cluster_name.stdout }}.conf" - stat: + - name: Run stat module on Ceph configuration file + ansible.builtin.stat: path: "/etc/ceph/{{ cluster_name.stdout }}.conf" register: ceph_conf_stat # Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format - - name: "make a backup of original {{ cluster_name.stdout }}.conf" - copy: + - name: Make a backup of original Ceph configuration file + ansible.builtin.copy: src: "/etc/ceph/{{ cluster_name.stdout }}.conf" dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak" remote_src: true @@ -57,7 +63,7 @@ group: "{{ ceph_conf_stat.stat.gr_name }}" mode: "{{ ceph_conf_stat.stat.mode }}" - - name: generate ceph configuration file + - name: Generate ceph configuration file openstack.config_template.config_template: src: "roles/ceph-config/templates/ceph.conf.j2" dest: "/etc/ceph/{{ cluster_name.stdout }}.conf" diff --git a/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml b/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml index 3d8b8afe71..d7bf4e34c2 100644 --- a/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml +++ b/infrastructure-playbooks/untested-by-ci/cluster-maintenance.yml @@ -8,16 +8,16 @@ # the operation won't last for too long. - hosts: - gather_facts: False + gather_facts: false tasks: - name: Set the noout flag - command: ceph osd set noout + ansible.builtin.command: ceph osd set noout delegate_to: - name: Turn off the server - command: poweroff + ansible.builtin.command: poweroff - name: Wait for the server to go down local_action: @@ -35,5 +35,5 @@ timeout: 3600 - name: Unset the noout flag - command: ceph osd unset noout + ansible.builtin.command: ceph osd unset noout delegate_to: diff --git a/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml b/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml index e7f9485b9b..e43e8eb408 100644 --- a/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml +++ b/infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml @@ -10,7 +10,7 @@ - hosts: mons serial: 1 - sudo: True + sudo: true vars: backup_dir: /tmp/ @@ -18,13 +18,13 @@ tasks: - name: Check if the node has be migrated already - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed register: migration_completed failed_when: false - name: Check for failed run - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar register: mon_archive_leftover @@ -32,36 +32,36 @@ when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True - name: Compress the store as much as possible - command: ceph tell mon.{{ ansible_facts['hostname'] }} compact + ansible.builtin.command: ceph tell mon.{{ ansible_facts['hostname'] }} compact when: migration_completed.stat.exists == False - name: Check if sysvinit - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit register: monsysvinit - changed_when: False + changed_when: false - name: Check if upstart - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart register: monupstart - changed_when: False + changed_when: false - name: Check if init does what it is supposed to do (Sysvinit) - shell: > + ansible.builtin.shell: > ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null register: ceph_status_sysvinit - changed_when: False + changed_when: false # can't complete the condition since the previous taks never ran... - fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!" when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True - name: Check if init does what it is supposed to do (upstart) - shell: > + ansible.builtin.shell: > ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null register: ceph_status_upstart - changed_when: False + changed_when: false - fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!" when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True @@ -124,7 +124,7 @@ # NOTE (leseb): should we convert upstart to sysvinit here already? - name: Archive monitor stores - shell: > + ansible.builtin.shell: > tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar chdir=/var/lib/ceph/ creates={{ ansible_facts['hostname'] }}.tar @@ -138,7 +138,7 @@ when: migration_completed.stat.exists == False - name: Reboot the server - command: reboot + ansible.builtin.command: reboot when: migration_completed.stat.exists == False - name: Wait for the server to come up @@ -154,16 +154,16 @@ when: migration_completed.stat.exists == False - name: Check if sysvinit - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit register: monsysvinit - changed_when: False + changed_when: false - name: Check if upstart - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart register: monupstart - changed_when: False + changed_when: false - name: Make sure the monitor is stopped (Upstart) service: > @@ -190,13 +190,13 @@ when: migration_completed.stat.exists == False - name: Copy keys and configs - shell: > + ansible.builtin.shell: > cp etc/ceph/* /etc/ceph/ chdir=/var/lib/ceph/ when: migration_completed.stat.exists == False - name: Configure RHEL7 for sysvinit - shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; + ansible.builtin.shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; when: migration_completed.stat.exists == False # NOTE (leseb): at this point the upstart and sysvinit checks are not necessary @@ -217,7 +217,7 @@ when: migration_completed.stat.exists == False - name: Waiting for the monitor to join the quorum... - shell: > + ansible.builtin.shell: > ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }} register: result until: result.rc == 0 @@ -238,20 +238,20 @@ - hosts: osds serial: 1 - sudo: True + sudo: true vars: backup_dir: /tmp/ tasks: - name: Check if the node has be migrated already - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/migration_completed register: migration_completed failed_when: false - name: Check for failed run - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar register: osd_archive_leftover @@ -259,44 +259,44 @@ when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True - name: Check if init does what it is supposed to do (Sysvinit) - shell: > + ansible.builtin.shell: > ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null register: ceph_status_sysvinit - changed_when: False + changed_when: false # can't complete the condition since the previous taks never ran... - fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!" when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True - name: Check if init does what it is supposed to do (upstart) - shell: > + ansible.builtin.shell: > ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]" register: ceph_status_upstart - changed_when: False + changed_when: false - fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!" when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True - name: Set the noout flag - command: ceph osd set noout + ansible.builtin.command: ceph osd set noout delegate_to: "{{ item }}" with_items: "{{ groups[mon_group_name][0] }}" when: migration_completed.stat.exists == False - name: Check if sysvinit - shell: stat /var/lib/ceph/osd/ceph-*/sysvinit + ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/sysvinit register: osdsysvinit failed_when: false - changed_when: False + changed_when: false - name: Check if upstart - shell: stat /var/lib/ceph/osd/ceph-*/upstart + ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/upstart register: osdupstart failed_when: false - changed_when: False + changed_when: false - name: Archive ceph configs - shell: > + ansible.builtin.shell: > tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar chdir=/var/lib/ceph/ creates={{ ansible_facts['hostname'] }}.tar @@ -321,7 +321,7 @@ when: migration_completed.stat.exists == False - name: Collect OSD ports - shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq + ansible.builtin.shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq register: osd_ports when: migration_completed.stat.exists == False @@ -349,11 +349,11 @@ when: migration_completed.stat.exists == False - name: Configure RHEL with sysvinit - shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; + ansible.builtin.shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \; when: migration_completed.stat.exists == False - name: Reboot the server - command: reboot + ansible.builtin.command: reboot when: migration_completed.stat.exists == False - name: Wait for the server to come up @@ -379,7 +379,7 @@ when: migration_completed.stat.exists == False - name: Copy keys and configs - shell: > + ansible.builtin.shell: > cp etc/ceph/* /etc/ceph/ chdir=/var/lib/ceph/ when: migration_completed.stat.exists == False @@ -405,7 +405,7 @@ # - "{{ osd_ports.stdout_lines }}" - name: Waiting for clean PGs... - shell: > + ansible.builtin.shell: > test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')" register: result until: result.rc == 0 @@ -425,27 +425,27 @@ when: migration_completed.stat.exists == False - name: Unset the noout flag - command: ceph osd unset noout + ansible.builtin.command: ceph osd unset noout delegate_to: "{{ item }}" with_items: "{{ groups[mon_group_name][0] }}" when: migration_completed.stat.exists == False - hosts: rgws serial: 1 - sudo: True + sudo: true vars: backup_dir: /tmp/ tasks: - name: Check if the node has be migrated already - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/radosgw/migration_completed register: migration_completed failed_when: false - name: Check for failed run - stat: > + ansible.builtin.stat: > path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar register: rgw_archive_leftover @@ -453,7 +453,7 @@ when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True - name: Archive rados gateway configs - shell: > + ansible.builtin.shell: > tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar chdir=/var/lib/ceph/ creates={{ ansible_facts['hostname'] }}.tar @@ -494,7 +494,7 @@ when: migration_completed.stat.exists == False - name: Reboot the server - command: reboot + ansible.builtin.command: reboot when: migration_completed.stat.exists == False - name: Wait for the server to come up @@ -520,7 +520,7 @@ when: migration_completed.stat.exists == False - name: Copy keys and configs - shell: > + ansible.builtin.shell: > {{ item }} chdir=/var/lib/ceph/ with_items: cp etc/ceph/* /etc/ceph/ diff --git a/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml b/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml index 29f40433c6..3892991f62 100644 --- a/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml +++ b/infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml @@ -31,7 +31,7 @@ tasks: - - name: load a variable file for devices partition + - name: Load a variable file for devices partition include_vars: "{{ item }}" with_first_found: - files: @@ -39,24 +39,24 @@ - "host_vars/default.yml" skip: true - - name: exit playbook, if devices not defined - fail: + - name: Exit playbook, if devices not defined + ansible.builtin.fail: msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml" when: devices is not defined - - name: install sgdisk(gdisk) - package: + - name: Install sgdisk(gdisk) + ansible.builtin.package: name: gdisk state: present register: result until: result is succeeded - - name: erase all previous partitions(dangerous!!!) - shell: sgdisk --zap-all -- /dev/{{item.device_name}} + - name: Erase all previous partitions(dangerous!!!) + ansible.builtin.shell: sgdisk --zap-all -- /dev/{{item.device_name}} with_items: "{{ devices }}" - - name: make osd partitions - shell: > + - name: Make osd partitions + ansible.builtin.shell: > sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}" "--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}" --mbrtogpt -- /dev/{{item.0.device_name}} @@ -74,8 +74,8 @@ group: 64045 when: ansible_facts['os_family'] == "Debian" - - name: change partitions ownership - file: + - name: Change partitions ownership + ansible.builtin.file: path: "/dev/{{item.0.device_name}}{{item.1.index}}" owner: "{{ owner | default('root')}}" group: "{{ group | default('disk')}}" @@ -85,8 +85,8 @@ when: item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$') - - name: change partitions ownership - file: + - name: Change partitions ownership + ansible.builtin.file: path: "/dev/{{item.0.device_name}}p{{item.1.index}}" owner: "{{ owner | default('root')}}" group: "{{ group | default('disk')}}" diff --git a/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml b/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml index dc7766c958..a43a0e64fe 100644 --- a/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml +++ b/infrastructure-playbooks/untested-by-ci/migrate-journal-to-ssd.yml @@ -37,69 +37,69 @@ serial: 1 tasks: - - name: get osd(s) if directory stat - stat: + - name: Get osd(s) if directory stat + ansible.builtin.stat: path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" register: osds_dir_stat with_subelements: - "{{ osds_journal_devices }}" - partitions - - name: exit playbook osd(s) is not on this host - fail: + - name: Exit playbook osd(s) is not on this host + ansible.builtin.fail: msg: exit playbook osd(s) is not on this host with_items: osds_dir_stat.results when: osds_dir_stat is defined and item.stat.exists == false - - name: install sgdisk(gdisk) - package: + - name: Install sgdisk(gdisk) + ansible.builtin.package: name: gdisk state: present register: result until: result is succeeded when: osds_journal_devices is defined - - name: generate uuid for osds journal - command: uuidgen + - name: Generate uuid for osds journal + ansible.builtin.command: uuidgen register: osds with_subelements: - "{{ osds_journal_devices }}" - partitions - - name: make osd partitions on ssd - shell: > + - name: Make osd partitions on ssd + ansible.builtin.shell: > sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal" --typecode={{ item.item[1].index }}:{{ journal_typecode }} --partition-guid={{ item.item[1].index }}:{{ item.stdout }} --mbrtogpt -- {{ item.item[0].device_name }} with_items: "{{ osds.results }}" - - name: stop osd(s) service - service: + - name: Stop osd(s) service + ansible.builtin.service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: stopped with_items: "{{ osds.results }}" - - name: flush osd(s) journal - command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }} + - name: Flush osd(s) journal + ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }} with_items: "{{ osds.results }}" when: osds_journal_devices is defined - - name: update osd(s) journal soft link - command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal + - name: Update osd(s) journal soft link + ansible.builtin.command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal with_items: "{{ osds.results }}" - - name: update osd(s) journal uuid - command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid + - name: Update osd(s) journal uuid + ansible.builtin.command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid with_items: "{{ osds.results }}" - - name: initialize osd(s) new journal - command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} + - name: Initialize osd(s) new journal + ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} with_items: "{{ osds.results }}" - - name: start osd(s) service - service: + - name: Start osd(s) service + ansible.builtin.service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: started with_items: "{{ osds.results }}" diff --git a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml index 37608ea090..d4840a6114 100644 --- a/infrastructure-playbooks/untested-by-ci/purge-multisite.yml +++ b/infrastructure-playbooks/untested-by-ci/purge-multisite.yml @@ -1,11 +1,11 @@ --- # Nukes a multisite config - hosts: rgws - become: True + become: true tasks: - include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml handlers: # Ansible 2.1.0 bug will ignore included handlers without this - - name: import_tasks roles/ceph-rgw/handlers/main.yml + - name: Import_tasks roles/ceph-rgw/handlers/main.yml import_tasks: roles/ceph-rgw/handlers/main.yml diff --git a/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml b/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml index 592556b057..8e5f1dfc9d 100644 --- a/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml +++ b/infrastructure-playbooks/untested-by-ci/recover-osds-after-ssd-journal-failure.yml @@ -40,11 +40,11 @@ # automation scripts to avoid interactive prompt. - hosts: localhost - gather_facts: no + gather_facts: false vars_prompt: - - name: target_host + - name: Target_host prompt: please enter the target hostname which to recover osds after ssd journal failure - private: no + private: false tasks: - add_host: name: "{{ target_host }}" @@ -59,16 +59,16 @@ - fail: msg="please define dev_ssds variable" when: dev_ssds|length <= 0 - - name: get osd(s) if directory stat - stat: + - name: Get osd(s) if directory stat + ansible.builtin.stat: path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" register: osds_dir_stat with_subelements: - "{{ dev_ssds }}" - partitions - - name: exit playbook osd(s) is not on this host - fail: + - name: Exit playbook osd(s) is not on this host + ansible.builtin.fail: msg: exit playbook osds is not no this host with_items: osds_dir_stat.results @@ -76,40 +76,40 @@ - osds_dir_stat is defined | bool - item.stat.exists == false - - name: install sgdisk(gdisk) - package: + - name: Install sgdisk(gdisk) + ansible.builtin.package: name: gdisk state: present register: result until: result is succeeded - - name: get osd(s) journal uuid - command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" + - name: Get osd(s) journal uuid + ansible.builtin.command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid" register: osds_uuid with_subelements: - "{{ dev_ssds }}" - partitions - - name: make partitions on new ssd - shell: > + - name: Make partitions on new ssd + ansible.builtin.shell: > sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal" --typecode={{ item.item[1].index }}:{{ journal_typecode }} --partition-guid={{ item.item[1].index }}:{{ item.stdout }} --mbrtogpt -- {{ item.item[0].device_name }} with_items: "{{ osds_uuid.results }}" - - name: stop osd(s) service - service: + - name: Stop osd(s) service + ansible.builtin.service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: stopped with_items: "{{ osds_uuid.results }}" - - name: reinitialize osd(s) journal in new ssd - command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} + - name: Reinitialize osd(s) journal in new ssd + ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }} with_items: "{{ osds_uuid.results }}" - - name: start osd(s) service - service: + - name: Start osd(s) service + ansible.builtin.service: name: "ceph-osd@{{ item.item[1].osd_id }}" state: started with_items: "{{ osds_uuid.results }}" diff --git a/infrastructure-playbooks/untested-by-ci/replace-osd.yml b/infrastructure-playbooks/untested-by-ci/replace-osd.yml index 11f3869148..c9f25f7ed8 100644 --- a/infrastructure-playbooks/untested-by-ci/replace-osd.yml +++ b/infrastructure-playbooks/untested-by-ci/replace-osd.yml @@ -15,38 +15,38 @@ # Overrides the prompt using -e option. Can be used in # automation scripts to avoid interactive prompt. -- name: gather facts and check the init system +- name: Gather facts and check the init system hosts: - "{{ mon_group_name|default('mons') }}" - "{{ osd_group_name|default('osds') }}" - become: True + become: true tasks: - - debug: msg="gather facts on all Ceph hosts for following reference" + - ansible.builtin.debug: msg="gather facts on all Ceph hosts for following reference" -- name: confirm whether user really meant to replace osd(s) +- name: Confirm whether user really meant to replace osd(s) hosts: localhost become: true vars_prompt: - - name: ireallymeanit + - name: Ireallymeanit prompt: Are you sure you want to replace the osd(s)? default: 'no' - private: no + private: false vars: mon_group_name: mons osd_group_name: osds pre_tasks: - - name: exit playbook, if user did not mean to replace the osd(s) - fail: + - name: Exit playbook, if user did not mean to replace the osd(s) + ansible.builtin.fail: msg: "Exiting replace-osd playbook, no osd(s) was/were replaced.. To replace the osd(s), either say 'yes' on the prompt or or use `-e ireallymeanit=yes` on the command line when invoking the playbook" when: ireallymeanit != 'yes' - - name: exit playbook, if no osd(s) was/were given - fail: + - name: Exit playbook, if no osd(s) was/were given + ansible.builtin.fail: msg: "osd_to_replace must be declared Exiting replace-osd playbook, no OSD(s) was/were replaced. On the command line when invoking the playbook, you can use @@ -54,36 +54,36 @@ when: osd_to_replace is not defined tasks: - - import_role: + - ansible.builtin.import_role: name: ceph-defaults post_tasks: - - name: set_fact container_exec_cmd build docker exec command (containerized) - set_fact: + - name: Set_fact container_exec_cmd build docker exec command (containerized) + ansible.builtin.set_fact: container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" when: containerized_deployment | bool - - name: exit playbook, if can not connect to the cluster - command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" + - name: Exit playbook, if can not connect to the cluster + ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health" register: ceph_health until: ceph_health.stdout.find("HEALTH") > -1 delegate_to: "{{ groups[mon_group_name][0] }}" retries: 5 delay: 2 - - name: find the host(s) where the osd(s) is/are running on - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}" + - name: Find the host(s) where the osd(s) is/are running on + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}" with_items: "{{ osd_to_replace.split(',') }}" delegate_to: "{{ groups[mon_group_name][0] }}" register: find_osd_hosts - - name: set_fact osd_hosts - set_fact: + - name: Set_fact osd_hosts + ansible.builtin.set_fact: osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}" with_items: "{{ find_osd_hosts.results }}" - - name: check if ceph admin key exists on the osd nodes - stat: + - name: Check if ceph admin key exists on the osd nodes + ansible.builtin.stat: path: "/etc/ceph/{{ cluster }}.client.admin.keyring" register: ceph_admin_key with_items: "{{ osd_hosts }}" @@ -91,8 +91,8 @@ failed_when: false when: not containerized_deployment | bool - - name: fail when admin key is not present - fail: + - name: Fail when admin key is not present + ansible.builtin.fail: msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done." with_items: "{{ ceph_admin_key.results }}" when: @@ -100,8 +100,8 @@ - item.stat.exists == false # NOTE(leseb): using '>' is the only way I could have the command working - - name: find osd device based on the id - shell: > + - name: Find osd device based on the id + ansible.builtin.shell: > docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}' @@ -112,8 +112,8 @@ delegate_to: "{{ item.0 }}" when: containerized_deployment | bool - - name: zapping osd(s) - container - shell: > + - name: Zapping osd(s) - container + ansible.builtin.shell: > docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} zap {{ item.1 }} @@ -124,8 +124,8 @@ delegate_to: "{{ item.0 }}" when: containerized_deployment | bool - - name: zapping osd(s) - non container - command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }} + - name: Zapping osd(s) - non container + ansible.builtin.command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }} run_once: true with_together: - "{{ osd_hosts }}" @@ -133,8 +133,8 @@ delegate_to: "{{ item.0 }}" when: not containerized_deployment | bool - - name: destroying osd(s) - command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap + - name: Destroying osd(s) + ansible.builtin.command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap run_once: true with_together: - "{{ osd_hosts }}" @@ -142,8 +142,8 @@ delegate_to: "{{ item.0 }}" when: not containerized_deployment | bool - - name: replace osd(s) - prepare - non container - command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen) + - name: Replace osd(s) - prepare - non container + ansible.builtin.command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen) run_once: true delegate_to: "{{ item.0 }}" with_together: @@ -151,8 +151,8 @@ - "{{ osd_to_replace_disks.results }}" - "{{ osd_to_replace.split(',') }}" - - name: replace osd(s) - prepare - container - shell: > + - name: Replace osd(s) - prepare - container + ansible.builtin.shell: > docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} prepare {{ item.1 }} @@ -162,16 +162,16 @@ - "{{ osd_hosts }}" - "{{ osd_to_replace_disks.results }}" - - name: replace osd(s) - activate - non container - command: ceph-disk activate {{ item.1 }}1 + - name: Replace osd(s) - activate - non container + ansible.builtin.command: ceph-disk activate {{ item.1 }}1 run_once: true delegate_to: "{{ item.0 }}" with_together: - "{{ osd_hosts }}" - "{{ osd_to_replace_disks.results }}" - - name: replace osd(s) - activate - container - shell: > + - name: Replace osd(s) - activate - container + ansible.builtin.shell: > docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} activate {{ item.1 }}1 @@ -181,10 +181,10 @@ - "{{ osd_hosts }}" - "{{ osd_to_replace_disks.results }}" - - name: show ceph health - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" + - name: Show ceph health + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s" delegate_to: "{{ groups[mon_group_name][0] }}" - - name: show ceph osd tree - command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree" + - name: Show ceph osd tree + ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree" delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-client/defaults/main.yml b/roles/ceph-client/defaults/main.yml index 1f70f1d385..887f5127dd 100644 --- a/roles/ceph-client/defaults/main.yml +++ b/roles/ceph-client/defaults/main.yml @@ -37,5 +37,5 @@ pools: # - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ... keys: - - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" } - - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" } + - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" } + - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" } diff --git a/roles/ceph-client/meta/main.yml b/roles/ceph-client/meta/main.yml index e637abf1ce..465aa110e9 100644 --- a/roles/ceph-client/meta/main.yml +++ b/roles/ceph-client/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs A Ceph Client license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-client/tasks/create_users_keys.yml b/roles/ceph-client/tasks/create_users_keys.yml index b85fd59b6a..90c006eadf 100644 --- a/roles/ceph-client/tasks/create_users_keys.yml +++ b/roles/ceph-client/tasks/create_users_keys.yml @@ -1,13 +1,13 @@ --- -- name: set_fact delegated_node - set_fact: +- name: Set_fact delegated_node + ansible.builtin.set_fact: delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}" -- name: set_fact admin_key_presence - set_fact: +- name: Set_fact admin_key_presence + ansible.builtin.set_fact: admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}" -- name: create cephx key(s) +- name: Create cephx key(s) ceph_key: name: "{{ item.name }}" caps: "{{ item.caps }}" @@ -30,8 +30,8 @@ - inventory_hostname == groups.get('_filtered_clients') | first no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: slurp client cephx key(s) - slurp: +- name: Slurp client cephx key(s) + ansible.builtin.slurp: src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring" with_items: "{{ keys }}" register: slurp_client_keys @@ -42,16 +42,17 @@ - inventory_hostname == groups.get('_filtered_clients') | first no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: pool related tasks +- name: Pool related tasks when: - admin_key_presence | bool - inventory_hostname == groups.get('_filtered_clients', []) | first block: - - import_role: + - name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: get_def_crush_rule_name.yml - - name: create ceph pool(s) + - name: Create ceph pool(s) ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -72,8 +73,8 @@ changed_when: false delegate_to: "{{ delegated_node }}" -- name: get client cephx keys - copy: +- name: Get client cephx keys + ansible.builtin.copy: dest: "{{ item.source }}" content: "{{ item.content | b64decode }}" mode: "{{ item.item.get('mode', '0600') }}" @@ -82,4 +83,3 @@ with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}" when: not item.get('skipped', False) no_log: "{{ no_log_on_ceph_key_tasks }}" - diff --git a/roles/ceph-client/tasks/main.yml b/roles/ceph-client/tasks/main.yml index 40ab958911..64e773ed88 100644 --- a/roles/ceph-client/tasks/main.yml +++ b/roles/ceph-client/tasks/main.yml @@ -1,10 +1,10 @@ --- -- name: include pre_requisite.yml - include_tasks: pre_requisite.yml +- name: Include pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml when: groups.get(mon_group_name, []) | length > 0 -- name: include create_users_keys.yml - include_tasks: create_users_keys.yml +- name: Include create_users_keys.yml + ansible.builtin.include_tasks: create_users_keys.yml when: - user_config | bool - not rolling_update | default(False) | bool diff --git a/roles/ceph-client/tasks/pre_requisite.yml b/roles/ceph-client/tasks/pre_requisite.yml index 92cce4c0b2..2adc652322 100644 --- a/roles/ceph-client/tasks/pre_requisite.yml +++ b/roles/ceph-client/tasks/pre_requisite.yml @@ -1,7 +1,10 @@ --- -- name: copy ceph admin keyring +- name: Copy ceph admin keyring + when: + - cephx | bool + - copy_admin_key | bool block: - - name: get keys from monitors + - name: Get keys from monitors ceph_key: name: client.admin cluster: "{{ cluster }}" @@ -15,14 +18,11 @@ run_once: true no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: copy ceph key(s) if needed - copy: + - name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" content: "{{ _admin_key.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" mode: "{{ ceph_keyring_permissions }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - when: - - cephx | bool - - copy_admin_key | bool diff --git a/roles/ceph-common/meta/main.yml b/roles/ceph-common/meta/main.yml index 8642d3c572..f30dfb9663 100644 --- a/roles/ceph-common/meta/main.yml +++ b/roles/ceph-common/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-common/tasks/configure_cluster_name.yml b/roles/ceph-common/tasks/configure_cluster_name.yml index 7b7cb42930..150b8030aa 100644 --- a/roles/ceph-common/tasks/configure_cluster_name.yml +++ b/roles/ceph-common/tasks/configure_cluster_name.yml @@ -1,11 +1,12 @@ --- -- name: configure cluster name - lineinfile: +- name: Configure cluster name + ansible.builtin.lineinfile: dest: /etc/sysconfig/ceph insertafter: EOF - create: yes + create: true line: "CLUSTER={{ cluster }}" regexp: "^CLUSTER=" + mode: "0644" when: ansible_facts['os_family'] in ["RedHat", "Suse"] # NOTE(leseb): we are performing the following check @@ -18,32 +19,34 @@ # - Jewel from latest Canonical 16.04 distro # - All previous versions from Canonical # - Infernalis from ceph.com -- name: debian based systems - configure cluster name +- name: Debian based systems - configure cluster name when: ansible_facts['os_family'] == "Debian" block: - - name: check /etc/default/ceph exist - stat: + - name: Check /etc/default/ceph exist + ansible.builtin.stat: path: /etc/default/ceph register: etc_default_ceph - check_mode: no + check_mode: false - - name: configure cluster name + - name: Configure cluster name when: etc_default_ceph.stat.exists block: - - name: when /etc/default/ceph is not dir - lineinfile: + - name: When /etc/default/ceph is not dir + ansible.builtin.lineinfile: dest: /etc/default/ceph insertafter: EOF - create: yes + create: true regexp: "^CLUSTER=" line: "CLUSTER={{ cluster }}" + mode: "0644" when: not etc_default_ceph.stat.isdir - - name: when /etc/default/ceph is dir - lineinfile: + - name: When /etc/default/ceph is dir + ansible.builtin.lineinfile: dest: /etc/default/ceph/ceph insertafter: EOF - create: yes + create: true regexp: "^CLUSTER=" line: "CLUSTER={{ cluster }}" + mode: "0644" when: etc_default_ceph.stat.isdir diff --git a/roles/ceph-common/tasks/configure_memory_allocator.yml b/roles/ceph-common/tasks/configure_memory_allocator.yml index 2e81b629fb..4db07a0336 100644 --- a/roles/ceph-common/tasks/configure_memory_allocator.yml +++ b/roles/ceph-common/tasks/configure_memory_allocator.yml @@ -1,34 +1,36 @@ --- -- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian - lineinfile: +- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian + ansible.builtin.lineinfile: dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}" insertafter: EOF - create: yes + create: true regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" + mode: "0644" when: - ansible_facts['os_family'] == 'Debian' - etc_default_ceph.stat.exists notify: - - restart ceph mons - - restart ceph mgrs - - restart ceph osds - - restart ceph mdss - - restart ceph rgws - - restart ceph rbdmirrors + - Restart ceph mons + - Restart ceph mgrs + - Restart ceph osds + - Restart ceph mdss + - Restart ceph rgws + - Restart ceph rbdmirrors -- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat - lineinfile: +- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat + ansible.builtin.lineinfile: dest: "/etc/sysconfig/ceph" insertafter: EOF - create: yes + create: true regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES=" line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}" + mode: "0644" when: ansible_facts['os_family'] == 'RedHat' notify: - - restart ceph mons - - restart ceph mgrs - - restart ceph osds - - restart ceph mdss - - restart ceph rgws - - restart ceph rbdmirrors + - Restart ceph mons + - Restart ceph mgrs + - Restart ceph osds + - Restart ceph mdss + - Restart ceph rgws + - Restart ceph rbdmirrors diff --git a/roles/ceph-common/tasks/configure_repository.yml b/roles/ceph-common/tasks/configure_repository.yml index 711c62908a..35583760a8 100644 --- a/roles/ceph-common/tasks/configure_repository.yml +++ b/roles/ceph-common/tasks/configure_repository.yml @@ -1,32 +1,32 @@ --- -- name: config repository for Red Hat based OS +- name: Config repository for Red Hat based OS when: ansible_facts['os_family'] == 'RedHat' block: - - name: include installs/configure_redhat_repository_installation.yml - include_tasks: installs/configure_redhat_repository_installation.yml + - name: Include installs/configure_redhat_repository_installation.yml + ansible.builtin.include_tasks: installs/configure_redhat_repository_installation.yml when: ceph_origin == 'repository' - - name: include installs/configure_redhat_local_installation.yml - include_tasks: installs/configure_redhat_local_installation.yml + - name: Include installs/configure_redhat_local_installation.yml + ansible.builtin.include_tasks: installs/configure_redhat_local_installation.yml when: ceph_origin == 'local' -- name: config repository for Debian based OS +- name: Config repository for Debian based OS when: ansible_facts['os_family'] == 'Debian' + tags: package-install block: - - name: include installs/configure_debian_repository_installation.yml - include_tasks: installs/configure_debian_repository_installation.yml + - name: Include installs/configure_debian_repository_installation.yml + ansible.builtin.include_tasks: installs/configure_debian_repository_installation.yml when: ceph_origin == 'repository' - - name: update apt cache if cache_valid_time has expired - apt: - update_cache: yes + - name: Update apt cache if cache_valid_time has expired + ansible.builtin.apt: + update_cache: true cache_valid_time: 3600 register: result until: result is succeeded - tags: package-install -- name: include installs/configure_suse_repository_installation.yml - include_tasks: installs/configure_suse_repository_installation.yml +- name: Include installs/configure_suse_repository_installation.yml + ansible.builtin.include_tasks: installs/configure_suse_repository_installation.yml when: - ansible_facts['os_family'] == 'Suse' - ceph_origin == 'repository' diff --git a/roles/ceph-common/tasks/create_rbd_client_dir.yml b/roles/ceph-common/tasks/create_rbd_client_dir.yml index 5560acdcba..271452fff4 100644 --- a/roles/ceph-common/tasks/create_rbd_client_dir.yml +++ b/roles/ceph-common/tasks/create_rbd_client_dir.yml @@ -1,6 +1,6 @@ --- -- name: create rbd client directory - file: +- name: Create rbd client directory + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ rbd_client_directory_owner }}" diff --git a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml index dd610f68c5..caedf4d8db 100644 --- a/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_debian_repository_installation.yml @@ -1,16 +1,16 @@ --- -- name: include debian_community_repository.yml - include_tasks: debian_community_repository.yml +- name: Include debian_community_repository.yml + ansible.builtin.include_tasks: debian_community_repository.yml when: ceph_repository == 'community' -- name: include debian_dev_repository.yml - include_tasks: debian_dev_repository.yml +- name: Include debian_dev_repository.yml + ansible.builtin.include_tasks: debian_dev_repository.yml when: ceph_repository == 'dev' -- name: include debian_custom_repository.yml - include_tasks: debian_custom_repository.yml +- name: Include debian_custom_repository.yml + ansible.builtin.include_tasks: debian_custom_repository.yml when: ceph_repository == 'custom' -- name: include debian_uca_repository.yml - include_tasks: debian_uca_repository.yml +- name: Include debian_uca_repository.yml + ansible.builtin.include_tasks: debian_uca_repository.yml when: ceph_repository == 'uca' diff --git a/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml index 90c8cbfd88..3f2ddd5366 100644 --- a/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_redhat_local_installation.yml @@ -1,43 +1,45 @@ --- -- name: make sure /tmp exists - file: +- name: Make sure /tmp exists + ansible.builtin.file: path: /tmp state: directory + mode: "0755" when: use_installer | bool -- name: use mktemp to create name for rundep - tempfile: +- name: Use mktemp to create name for rundep + ansible.builtin.tempfile: path: /tmp prefix: rundep. register: rundep_location when: use_installer | bool -- name: copy rundep - copy: +- name: Copy rundep + ansible.builtin.copy: src: "{{ ansible_dir }}/rundep" dest: "{{ rundep_location.path }}" + mode: preserve when: use_installer | bool -- name: install ceph dependencies - script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}" +- name: Install ceph dependencies + ansible.builtin.script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}" when: use_installer | bool -- name: ensure rsync is installed - package: +- name: Ensure rsync is installed + ansible.builtin.package: name: rsync state: present register: result until: result is succeeded -- name: synchronize ceph install - synchronize: +- name: Synchronize ceph install + ansible.posix.synchronize: src: "{{ ceph_installation_dir }}/" dest: "/" -- name: create user group ceph - group: +- name: Create user group ceph + ansible.builtin.group: name: 'ceph' -- name: create user ceph - user: +- name: Create user ceph + ansible.builtin.user: name: 'ceph' diff --git a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml index 86080660f2..67f8b6057d 100644 --- a/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml @@ -1,22 +1,22 @@ --- -- name: include redhat_community_repository.yml - include_tasks: redhat_community_repository.yml +- name: Include redhat_community_repository.yml + ansible.builtin.include_tasks: redhat_community_repository.yml when: ceph_repository == 'community' -- name: include redhat_rhcs_repository.yml - include_tasks: redhat_rhcs_repository.yml +- name: Include redhat_rhcs_repository.yml + ansible.builtin.include_tasks: redhat_rhcs_repository.yml when: ceph_repository == 'rhcs' -- name: include redhat_dev_repository.yml - include_tasks: redhat_dev_repository.yml +- name: Include redhat_dev_repository.yml + ansible.builtin.include_tasks: redhat_dev_repository.yml when: ceph_repository == 'dev' -- name: include redhat_custom_repository.yml - include_tasks: redhat_custom_repository.yml +- name: Include redhat_custom_repository.yml + ansible.builtin.include_tasks: redhat_custom_repository.yml when: ceph_repository == 'custom' # Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version -- name: purge yum cache - command: yum clean all #noqa: [303] +- name: Purge yum cache + ansible.builtin.command: yum clean all # noqa: [303] changed_when: false when: ansible_facts['pkg_mgr'] == 'yum' diff --git a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml index 689cbd915f..32d9e51dc6 100644 --- a/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml +++ b/roles/ceph-common/tasks/installs/configure_suse_repository_installation.yml @@ -1,4 +1,4 @@ --- -- name: include suse_obs_repository.yml - include_tasks: suse_obs_repository.yml +- name: Include suse_obs_repository.yml + ansible.builtin.include_tasks: suse_obs_repository.yml when: ceph_repository == 'obs' diff --git a/roles/ceph-common/tasks/installs/debian_community_repository.yml b/roles/ceph-common/tasks/installs/debian_community_repository.yml index 6832a3f5b1..c334521081 100644 --- a/roles/ceph-common/tasks/installs/debian_community_repository.yml +++ b/roles/ceph-common/tasks/installs/debian_community_repository.yml @@ -1,20 +1,20 @@ --- -- name: install dependencies for apt modules - package: +- name: Install dependencies for apt modules + ansible.builtin.package: name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common'] - update_cache: yes + update_cache: true register: result until: result is succeeded -- name: configure debian ceph community repository stable key - apt_key: - data: "{{ lookup('file', role_path+'/files/cephstable.asc') }}" +- name: Configure debian ceph community repository stable key + ansible.builtin.apt_key: + data: "{{ lookup('file', role_path + '/files/cephstable.asc') }}" state: present register: result until: result is succeeded -- name: configure debian ceph stable community repository - apt_repository: +- name: Configure debian ceph stable community repository + ansible.builtin.apt_repository: repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" state: present - update_cache: yes + update_cache: true diff --git a/roles/ceph-common/tasks/installs/debian_custom_repository.yml b/roles/ceph-common/tasks/installs/debian_custom_repository.yml index 607ce0896c..2d1fb0746b 100644 --- a/roles/ceph-common/tasks/installs/debian_custom_repository.yml +++ b/roles/ceph-common/tasks/installs/debian_custom_repository.yml @@ -1,14 +1,14 @@ --- -- name: configure debian custom apt key - apt_key: +- name: Configure debian custom apt key + ansible.builtin.apt_key: url: "{{ ceph_custom_key }}" state: present register: result until: result is succeeded when: ceph_custom_key is defined -- name: configure debian custom repository - apt_repository: +- name: Configure debian custom repository + ansible.builtin.apt_repository: repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main" state: present - update_cache: yes + update_cache: true diff --git a/roles/ceph-common/tasks/installs/debian_dev_repository.yml b/roles/ceph-common/tasks/installs/debian_dev_repository.yml index 446049a644..9533fcfe7e 100644 --- a/roles/ceph-common/tasks/installs/debian_dev_repository.yml +++ b/roles/ceph-common/tasks/installs/debian_dev_repository.yml @@ -1,12 +1,12 @@ --- -- name: fetch ceph debian development repository - uri: +- name: Fetch ceph debian development repository + ansible.builtin.uri: url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}" - return_content: yes + return_content: true register: ceph_dev_deb_repo -- name: configure ceph debian development repository - apt_repository: +- name: Configure ceph debian development repository + ansible.builtin.apt_repository: repo: "{{ ceph_dev_deb_repo.content }}" state: present - update_cache: yes + update_cache: true diff --git a/roles/ceph-common/tasks/installs/debian_uca_repository.yml b/roles/ceph-common/tasks/installs/debian_uca_repository.yml index 7861cfd2df..be1a56204d 100644 --- a/roles/ceph-common/tasks/installs/debian_uca_repository.yml +++ b/roles/ceph-common/tasks/installs/debian_uca_repository.yml @@ -1,12 +1,12 @@ --- -- name: add ubuntu cloud archive key package - package: +- name: Add ubuntu cloud archive key package + ansible.builtin.package: name: ubuntu-cloud-keyring register: result until: result is succeeded -- name: add ubuntu cloud archive repository - apt_repository: +- name: Add ubuntu cloud archive repository + ansible.builtin.apt_repository: repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main" state: present - update_cache: yes + update_cache: true diff --git a/roles/ceph-common/tasks/installs/install_debian_packages.yml b/roles/ceph-common/tasks/installs/install_debian_packages.yml index 3c6db106f3..edb4a74281 100644 --- a/roles/ceph-common/tasks/installs/install_debian_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_packages.yml @@ -1,9 +1,9 @@ --- -- name: install ceph for debian - apt: +- name: Install ceph for debian + ansible.builtin.apt: name: "{{ debian_ceph_pkgs | unique }}" - update_cache: no - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + update_cache: false + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" register: result until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml index d9b31e9445..29a799cefa 100644 --- a/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml +++ b/roles/ceph-common/tasks/installs/install_debian_rhcs_packages.yml @@ -1,7 +1,7 @@ --- -- name: install red hat storage ceph packages for debian - apt: +- name: Install red hat storage ceph packages for debian + ansible.builtin.apt: pkg: "{{ debian_ceph_pkgs | unique }}" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/install_on_clear.yml b/roles/ceph-common/tasks/installs/install_on_clear.yml index f640c52842..84619f95cc 100644 --- a/roles/ceph-common/tasks/installs/install_on_clear.yml +++ b/roles/ceph-common/tasks/installs/install_on_clear.yml @@ -1,6 +1,6 @@ --- -- name: install ceph bundle - swupd: +- name: Install ceph bundle + community.general.swupd: name: storage-cluster state: present register: result diff --git a/roles/ceph-common/tasks/installs/install_on_debian.yml b/roles/ceph-common/tasks/installs/install_on_debian.yml index b10d04fafa..c7a7129d03 100644 --- a/roles/ceph-common/tasks/installs/install_on_debian.yml +++ b/roles/ceph-common/tasks/installs/install_on_debian.yml @@ -1,20 +1,20 @@ -- name: install dependencies - apt: +- name: Install dependencies + ansible.builtin.apt: name: "{{ debian_package_dependencies }}" state: present - update_cache: yes + update_cache: true cache_valid_time: 3600 register: result until: result is succeeded -- name: include install_debian_packages.yml - include_tasks: install_debian_packages.yml +- name: Include install_debian_packages.yml + ansible.builtin.include_tasks: install_debian_packages.yml when: - (ceph_origin == 'repository' or ceph_origin == 'distro') - ceph_repository != 'rhcs' -- name: include install_debian_rhcs_packages.yml - include_tasks: install_debian_rhcs_packages.yml +- name: Include install_debian_rhcs_packages.yml + ansible.builtin.include_tasks: install_debian_rhcs_packages.yml when: - (ceph_origin == 'repository' or ceph_origin == 'distro') - ceph_repository == 'rhcs' diff --git a/roles/ceph-common/tasks/installs/install_redhat_packages.yml b/roles/ceph-common/tasks/installs/install_redhat_packages.yml index 7e9f6d62fd..08769de92f 100644 --- a/roles/ceph-common/tasks/installs/install_redhat_packages.yml +++ b/roles/ceph-common/tasks/installs/install_redhat_packages.yml @@ -1,23 +1,23 @@ --- -- name: install redhat dependencies - package: +- name: Install redhat dependencies + ansible.builtin.package: name: "{{ redhat_package_dependencies }}" state: present register: result until: result is succeeded when: ansible_facts['distribution'] == 'RedHat' -- name: install centos dependencies - yum: +- name: Install centos dependencies + ansible.builtin.yum: name: "{{ centos_package_dependencies }}" state: present register: result until: result is succeeded when: ansible_facts['distribution'] == 'CentOS' -- name: install redhat ceph packages - package: +- name: Install redhat ceph packages + ansible.builtin.package: name: "{{ redhat_ceph_pkgs | unique }}" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/install_suse_packages.yml b/roles/ceph-common/tasks/installs/install_suse_packages.yml index 5c5566faf5..5adb39d347 100644 --- a/roles/ceph-common/tasks/installs/install_suse_packages.yml +++ b/roles/ceph-common/tasks/installs/install_suse_packages.yml @@ -1,14 +1,14 @@ --- -- name: install SUSE/openSUSE dependencies - package: +- name: Install SUSE/openSUSE dependencies + ansible.builtin.package: name: "{{ suse_package_dependencies }}" state: present register: result until: result is succeeded -- name: install SUSE/openSUSE ceph packages - package: +- name: Install SUSE/openSUSE ceph packages + ansible.builtin.package: name: "{{ suse_ceph_pkgs | unique }}" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded diff --git a/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml b/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml index f27d67bb53..a1785072bd 100644 --- a/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml +++ b/roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml @@ -1,6 +1,6 @@ --- -- name: enable red hat storage tools repository - rhsm_repository: +- name: Enable red hat storage tools repository + community.general.rhsm_repository: name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms" when: - mon_group_name in group_names diff --git a/roles/ceph-common/tasks/installs/redhat_community_repository.yml b/roles/ceph-common/tasks/installs/redhat_community_repository.yml index 4db2737ccd..91adb28373 100644 --- a/roles/ceph-common/tasks/installs/redhat_community_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_community_repository.yml @@ -1,24 +1,24 @@ --- -- name: install yum plugin priorities - package: +- name: Install yum plugin priorities + ansible.builtin.package: name: yum-plugin-priorities register: result until: result is succeeded tags: with_pkg when: ansible_facts['distribution_major_version'] | int == 7 -- name: configure red hat ceph community repository stable key - rpm_key: +- name: Configure red hat ceph community repository stable key + ansible.builtin.rpm_key: key: "{{ ceph_stable_key }}" state: present register: result until: result is succeeded -- name: configure red hat ceph stable community repository - yum_repository: +- name: Configure red hat ceph stable community repository + ansible.builtin.yum_repository: name: ceph_stable description: Ceph Stable $basearch repo - gpgcheck: yes + gpgcheck: true state: present gpgkey: "{{ ceph_stable_key }}" baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch" @@ -27,11 +27,11 @@ register: result until: result is succeeded -- name: configure red hat ceph stable noarch community repository - yum_repository: +- name: Configure red hat ceph stable noarch community repository + ansible.builtin.yum_repository: name: ceph_stable_noarch description: Ceph Stable noarch repo - gpgcheck: yes + gpgcheck: true state: present gpgkey: "{{ ceph_stable_key }}" baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch" diff --git a/roles/ceph-common/tasks/installs/redhat_custom_repository.yml b/roles/ceph-common/tasks/installs/redhat_custom_repository.yml index 5bdefa2c32..c944d3ad3c 100644 --- a/roles/ceph-common/tasks/installs/redhat_custom_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_custom_repository.yml @@ -1,15 +1,16 @@ --- -- name: configure red hat custom rpm key - rpm_key: +- name: Configure red hat custom rpm key + ansible.builtin.rpm_key: key: "{{ ceph_custom_key }}" state: present register: result until: result is succeeded when: ceph_custom_key is defined -- name: configure red hat custom repository - get_url: +- name: Configure red hat custom repository + ansible.builtin.get_url: url: "{{ ceph_custom_repo }}" dest: /etc/yum.repos.d owner: root group: root + mode: "0644" diff --git a/roles/ceph-common/tasks/installs/redhat_dev_repository.yml b/roles/ceph-common/tasks/installs/redhat_dev_repository.yml index 6d157781d3..541552aac2 100644 --- a/roles/ceph-common/tasks/installs/redhat_dev_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_dev_repository.yml @@ -1,21 +1,22 @@ --- -- name: fetch ceph red hat development repository - uri: +- name: Fetch ceph red hat development repository + ansible.builtin.uri: # Use the centos repo since we don't currently have a dedicated red hat repo url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}" - return_content: yes + return_content: true register: ceph_dev_yum_repo -- name: configure ceph red hat development repository - copy: +- name: Configure ceph red hat development repository + ansible.builtin.copy: content: "{{ ceph_dev_yum_repo.content }}" dest: /etc/yum.repos.d/ceph-dev.repo owner: root group: root - backup: yes + mode: "0644" + backup: true -- name: remove ceph_stable repositories - yum_repository: +- name: Remove ceph_stable repositories + ansible.builtin.yum_repository: name: '{{ item }}' file: ceph_stable state: absent diff --git a/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml b/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml index 5763b7386b..4d37014e9c 100644 --- a/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml +++ b/roles/ceph-common/tasks/installs/redhat_rhcs_repository.yml @@ -1,3 +1,3 @@ --- -- name: include prerequisite_rhcs_cdn_install.yml - include_tasks: prerequisite_rhcs_cdn_install.yml +- name: Include prerequisite_rhcs_cdn_install.yml + ansible.builtin.include_tasks: prerequisite_rhcs_cdn_install.yml diff --git a/roles/ceph-common/tasks/installs/suse_obs_repository.yml b/roles/ceph-common/tasks/installs/suse_obs_repository.yml index a59364546c..327f15e9a2 100644 --- a/roles/ceph-common/tasks/installs/suse_obs_repository.yml +++ b/roles/ceph-common/tasks/installs/suse_obs_repository.yml @@ -1,8 +1,8 @@ --- -- name: configure openSUSE ceph OBS repository - zypper_repository: +- name: Configure openSUSE ceph OBS repository + community.general.zypper_repository: name: "OBS:filesystems:ceph:{{ ceph_release }}" state: present repo: "{{ ceph_obs_repo }}" - auto_import_keys: yes - autorefresh: yes + auto_import_keys: true + autorefresh: true diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index a3fa31e332..03b7bd2166 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -1,71 +1,71 @@ --- -- name: include configure_repository.yml - include_tasks: configure_repository.yml +- name: Include configure_repository.yml + ansible.builtin.include_tasks: configure_repository.yml tags: package-configure -- name: include installs/install_redhat_packages.yml - include_tasks: installs/install_redhat_packages.yml +- name: Include installs/install_redhat_packages.yml + ansible.builtin.include_tasks: installs/install_redhat_packages.yml when: - ansible_facts['os_family'] == 'RedHat' - (ceph_origin == 'repository' or ceph_origin == 'distro') tags: package-install -- name: include installs/install_suse_packages.yml - include_tasks: installs/install_suse_packages.yml +- name: Include installs/install_suse_packages.yml + ansible.builtin.include_tasks: installs/install_suse_packages.yml when: ansible_facts['os_family'] == 'Suse' tags: package-install -- name: include installs/install_on_debian.yml - include_tasks: installs/install_on_debian.yml +- name: Include installs/install_on_debian.yml + ansible.builtin.include_tasks: installs/install_on_debian.yml tags: package-install when: ansible_facts['os_family'] == 'Debian' -- name: include_tasks installs/install_on_clear.yml - include_tasks: installs/install_on_clear.yml +- name: Include_tasks installs/install_on_clear.yml + ansible.builtin.include_tasks: installs/install_on_clear.yml when: ansible_facts['os_family'] == 'ClearLinux' tags: package-install -- name: get ceph version - command: ceph --version +- name: Get ceph version + ansible.builtin.command: ceph --version changed_when: false - check_mode: no + check_mode: false register: ceph_version -- name: set_fact ceph_version - set_fact: +- name: Set_fact ceph_version + ansible.builtin.set_fact: ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" # override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory -- name: include release-rhcs.yml - include_tasks: release-rhcs.yml +- name: Include release-rhcs.yml + ansible.builtin.include_tasks: release-rhcs.yml when: ceph_repository in ['rhcs', 'dev'] or ceph_origin == 'distro' tags: always -- name: set_fact ceph_release - override ceph_release with ceph_stable_release - set_fact: +- name: Set_fact ceph_release - override ceph_release with ceph_stable_release + ansible.builtin.set_fact: ceph_release: "{{ ceph_stable_release }}" when: - ceph_origin == 'repository' - ceph_repository not in ['dev', 'rhcs', 'custom'] tags: always -- name: include create_rbd_client_dir.yml - include_tasks: create_rbd_client_dir.yml +- name: Include create_rbd_client_dir.yml + ansible.builtin.include_tasks: create_rbd_client_dir.yml -- name: include configure_cluster_name.yml - include_tasks: configure_cluster_name.yml +- name: Include configure_cluster_name.yml + ansible.builtin.include_tasks: configure_cluster_name.yml -- name: include configure_memory_allocator.yml - include_tasks: configure_memory_allocator.yml +- name: Include configure_memory_allocator.yml + ansible.builtin.include_tasks: configure_memory_allocator.yml when: - (ceph_tcmalloc_max_total_thread_cache | int) > 0 - (ceph_origin == 'repository' or ceph_origin == 'distro') -- name: include selinux.yml - include_tasks: selinux.yml +- name: Include selinux.yml + ansible.builtin.include_tasks: selinux.yml when: - ansible_facts['os_family'] == 'RedHat' - inventory_hostname in groups.get(nfs_group_name, []) - or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) \ No newline at end of file + or inventory_hostname in groups.get(rgwloadbalancer_group_name, []) diff --git a/roles/ceph-common/tasks/release-rhcs.yml b/roles/ceph-common/tasks/release-rhcs.yml index fbe1d7aa18..34b936192e 100644 --- a/roles/ceph-common/tasks/release-rhcs.yml +++ b/roles/ceph-common/tasks/release-rhcs.yml @@ -1,45 +1,45 @@ --- -- name: set_fact ceph_release jewel - set_fact: +- name: Set_fact ceph_release jewel + ansible.builtin.set_fact: ceph_release: jewel when: ceph_version.split('.')[0] is version('10', '==') -- name: set_fact ceph_release kraken - set_fact: +- name: Set_fact ceph_release kraken + ansible.builtin.set_fact: ceph_release: kraken when: ceph_version.split('.')[0] is version('11', '==') -- name: set_fact ceph_release luminous - set_fact: +- name: Set_fact ceph_release luminous + ansible.builtin.set_fact: ceph_release: luminous when: ceph_version.split('.')[0] is version('12', '==') -- name: set_fact ceph_release mimic - set_fact: +- name: Set_fact ceph_release mimic + ansible.builtin.set_fact: ceph_release: mimic when: ceph_version.split('.')[0] is version('13', '==') -- name: set_fact ceph_release nautilus - set_fact: +- name: Set_fact ceph_release nautilus + ansible.builtin.set_fact: ceph_release: nautilus when: ceph_version.split('.')[0] is version('14', '==') -- name: set_fact ceph_release octopus - set_fact: +- name: Set_fact ceph_release octopus + ansible.builtin.set_fact: ceph_release: octopus when: ceph_version.split('.')[0] is version('15', '==') -- name: set_fact ceph_release pacific - set_fact: +- name: Set_fact ceph_release pacific + ansible.builtin.set_fact: ceph_release: pacific when: ceph_version.split('.')[0] is version('16', '==') -- name: set_fact ceph_release quincy - set_fact: +- name: Set_fact ceph_release quincy + ansible.builtin.set_fact: ceph_release: quincy when: ceph_version.split('.')[0] is version('17', '==') -- name: set_fact ceph_release reef - set_fact: +- name: Set_fact ceph_release reef + ansible.builtin.set_fact: ceph_release: reef when: ceph_version.split('.')[0] is version('18', '==') diff --git a/roles/ceph-common/tasks/selinux.yml b/roles/ceph-common/tasks/selinux.yml index 5166c017cd..65459b58b4 100644 --- a/roles/ceph-common/tasks/selinux.yml +++ b/roles/ceph-common/tasks/selinux.yml @@ -1,17 +1,17 @@ --- -- name: if selinux is not disabled +- name: If selinux is not disabled when: ansible_facts['selinux']['status'] == 'enabled' block: - - name: install policycoreutils-python - package: + - name: Install policycoreutils-python + ansible.builtin.package: name: policycoreutils-python state: present register: result until: result is succeeded when: ansible_facts['distribution_major_version'] == '7' - - name: install python3-policycoreutils on RHEL 8 - package: + - name: Install python3-policycoreutils on RHEL 8 + ansible.builtin.package: name: python3-policycoreutils state: present register: result diff --git a/roles/ceph-config/meta/main.yml b/roles/ceph-config/meta/main.yml index 21f0a41bd1..8462226473 100644 --- a/roles/ceph-config/meta/main.yml +++ b/roles/ceph-config/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Guillaume Abrioux description: Handles ceph-ansible initial configuration license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-config/tasks/create_ceph_initial_dirs.yml b/roles/ceph-config/tasks/create_ceph_initial_dirs.yml index a7f1325c85..a131ac2152 100644 --- a/roles/ceph-config/tasks/create_ceph_initial_dirs.yml +++ b/roles/ceph-config/tasks/create_ceph_initial_dirs.yml @@ -1,11 +1,11 @@ --- -- name: create ceph initial directories - file: +- name: Create ceph initial directories + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" - mode: 0755 + mode: "0755" loop: - /etc/ceph - /var/lib/ceph/ diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml index 36b46f759d..27d16dede3 100644 --- a/roles/ceph-config/tasks/main.yml +++ b/roles/ceph-config/tasks/main.yml @@ -1,30 +1,33 @@ --- -- name: include create_ceph_initial_dirs.yml - include_tasks: create_ceph_initial_dirs.yml +- name: Include create_ceph_initial_dirs.yml + ansible.builtin.include_tasks: create_ceph_initial_dirs.yml when: containerized_deployment | bool -- name: include_tasks rgw_systemd_environment_file.yml - include_tasks: rgw_systemd_environment_file.yml +- name: Include_tasks rgw_systemd_environment_file.yml + ansible.builtin.include_tasks: rgw_systemd_environment_file.yml when: inventory_hostname in groups.get(rgw_group_name, []) -- name: config file operations related to OSDs +- name: Config file operations related to OSDs when: - inventory_hostname in groups.get(osd_group_name, []) # the rolling_update.yml playbook sets num_osds to the number of currently # running osds - not rolling_update | bool block: - - name: reset num_osds - set_fact: + - name: Reset num_osds + ansible.builtin.set_fact: num_osds: 0 - - name: count number of osds for lvm scenario - set_fact: + - name: Count number of osds for lvm scenario + ansible.builtin.set_fact: num_osds: "{{ num_osds | int + (lvm_volumes | length | int) }}" when: lvm_volumes | default([]) | length > 0 - - block: - - name: look up for ceph-volume rejected devices + - name: Ceph-volume pre-requisites tasks + when: + - devices | default([]) | length > 0 + block: + - name: Look up for ceph-volume rejected devices ceph_volume: cluster: "{{ cluster }}" action: "inventory" @@ -35,17 +38,17 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" PYTHONIOENCODING: utf-8 - - name: set_fact rejected_devices - set_fact: + - name: Set_fact rejected_devices + ansible.builtin.set_fact: _rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}" with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}" when: "'Used by ceph-disk' in item.rejected_reasons" - - name: set_fact _devices - set_fact: + - name: Set_fact _devices + ansible.builtin.set_fact: _devices: "{{ devices | difference(_rejected_devices | default([])) }}" - - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created + - name: Run 'ceph-volume lvm batch --report' to see how many osds are to be created ceph_volume: cluster: "{{ cluster }}" objectstore: "{{ osd_objectstore }}" @@ -62,23 +65,21 @@ PYTHONIOENCODING: utf-8 when: _devices | default([]) | length > 0 - - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report) - set_fact: + - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report) + ansible.builtin.set_fact: num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}" when: - (lvm_batch_report.stdout | default('{}') | from_json) is mapping - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool - - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report) - set_fact: + - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report) + ansible.builtin.set_fact: num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}" when: - (lvm_batch_report.stdout | default('{}') | from_json) is not mapping - (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool - when: - - devices | default([]) | length > 0 - - name: run 'ceph-volume lvm list' to see how many osds have already been created + - name: Run 'ceph-volume lvm list' to see how many osds have already been created ceph_volume: action: "list" register: lvm_list @@ -89,31 +90,31 @@ PYTHONIOENCODING: utf-8 changed_when: false - - name: set_fact num_osds (add existing osds) - set_fact: + - name: Set_fact num_osds (add existing osds) + ansible.builtin.set_fact: num_osds: "{{ num_osds | int + (lvm_list.stdout | default('{}') | from_json | dict2items | map(attribute='value') | flatten | map(attribute='devices') | sum(start=[]) | difference(lvm_volumes | default([]) | map(attribute='data')) | length | int) }}" -- name: set osd related config facts +- name: Set osd related config facts when: inventory_hostname in groups.get(osd_group_name, []) block: - - name: set_fact _osd_memory_target, override from ceph_conf_overrides - set_fact: + - name: Set_fact _osd_memory_target, override from ceph_conf_overrides + ansible.builtin.set_fact: _osd_memory_target: "{{ item }}" loop: - "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}" - "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}" when: item - - name: set_fact _osd_memory_target - set_fact: + - name: Set_fact _osd_memory_target + ansible.builtin.set_fact: _osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}" when: - _osd_memory_target is undefined - num_osds | default(0) | int > 0 - ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float) -- name: create ceph conf directory - file: +- name: Create ceph conf directory + ansible.builtin.file: path: "/etc/ceph" state: directory owner: "ceph" @@ -121,13 +122,13 @@ mode: "{{ ceph_directories_mode }}" when: not containerized_deployment | bool -- name: import_role ceph-facts - import_role: +- name: Import_role ceph-facts + ansible.builtin.import_role: name: ceph-facts tasks_from: set_radosgw_address.yml when: inventory_hostname in groups.get(rgw_group_name, []) -- name: "generate {{ cluster }}.conf configuration file" +- name: Generate Ceph file openstack.config_template.config_template: src: "ceph.conf.j2" dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" @@ -136,10 +137,10 @@ mode: "0644" config_type: ini notify: - - restart ceph mons - - restart ceph osds - - restart ceph mdss - - restart ceph rgws - - restart ceph mgrs - - restart ceph rbdmirrors - - restart ceph rbd-target-api-gw + - Restart ceph mons + - Restart ceph osds + - Restart ceph mdss + - Restart ceph rgws + - Restart ceph mgrs + - Restart ceph rbdmirrors + - Restart ceph rbd-target-api-gw diff --git a/roles/ceph-config/tasks/rgw_systemd_environment_file.yml b/roles/ceph-config/tasks/rgw_systemd_environment_file.yml index 5dac324393..5876738ade 100644 --- a/roles/ceph-config/tasks/rgw_systemd_environment_file.yml +++ b/roles/ceph-config/tasks/rgw_systemd_environment_file.yml @@ -1,6 +1,6 @@ --- -- name: create rados gateway instance directories - file: +- name: Create rados gateway instance directories + ansible.builtin.file: path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -8,8 +8,8 @@ mode: "{{ ceph_directories_mode | default('0755') }}" with_items: "{{ rgw_instances }}" -- name: generate environment file - copy: +- name: Generate environment file + ansible.builtin.copy: dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile" owner: "root" group: "root" @@ -19,4 +19,4 @@ with_items: "{{ rgw_instances }}" when: - containerized_deployment | bool - - rgw_instances is defined \ No newline at end of file + - rgw_instances is defined diff --git a/roles/ceph-container-common/meta/main.yml b/roles/ceph-container-common/meta/main.yml index 8642d3c572..f30dfb9663 100644 --- a/roles/ceph-container-common/meta/main.yml +++ b/roles/ceph-container-common/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-container-common/tasks/fetch_image.yml b/roles/ceph-container-common/tasks/fetch_image.yml index e1b67c7bca..7f504f91dd 100644 --- a/roles/ceph-container-common/tasks/fetch_image.yml +++ b/roles/ceph-container-common/tasks/fetch_image.yml @@ -1,6 +1,6 @@ --- -- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image" - command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" +- name: Pulling Ceph container image + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" changed_when: false register: docker_image until: docker_image.rc == 0 @@ -12,8 +12,8 @@ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" NO_PROXY: "{{ ceph_docker_no_proxy }}" -- name: "pulling alertmanager/prometheus/grafana container images" - command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}" +- name: Pulling alertmanager/prometheus/grafana container images + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}" changed_when: false register: monitoring_images until: monitoring_images.rc == 0 @@ -31,8 +31,8 @@ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" NO_PROXY: "{{ ceph_docker_no_proxy }}" -- name: "pulling node-exporter container image" - command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}" +- name: Pulling node-exporter container image + ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}" changed_when: false register: node_exporter_image until: node_exporter_image.rc == 0 @@ -54,27 +54,29 @@ HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" NO_PROXY: "{{ ceph_docker_no_proxy }}" -- name: export local ceph dev image - command: > +- name: Export local ceph dev image + ansible.builtin.command: > {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}" delegate_to: localhost + changed_when: false when: (ceph_docker_dev_image is defined and ceph_docker_dev_image) run_once: true -- name: copy ceph dev image file - copy: +- name: Copy ceph dev image file + ansible.builtin.copy: src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + mode: "0644" when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) -- name: load ceph dev image - command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" +- name: Load ceph dev image + ansible.builtin.command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" + changed_when: false when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) -- name: remove tmp ceph dev image file - file: +- name: Remove tmp ceph dev image file + ansible.builtin.file: name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" state: absent when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool) - diff --git a/roles/ceph-container-common/tasks/main.yml b/roles/ceph-container-common/tasks/main.yml index 4b663eb300..371e3436f0 100644 --- a/roles/ceph-container-common/tasks/main.yml +++ b/roles/ceph-container-common/tasks/main.yml @@ -1,38 +1,39 @@ --- -- name: generate systemd ceph-mon target file - copy: +- name: Generate systemd ceph-mon target file + ansible.builtin.copy: src: ceph.target dest: /etc/systemd/system/ceph.target + mode: "0644" -- name: enable ceph.target - service: +- name: Enable ceph.target + ansible.builtin.service: name: ceph.target - enabled: yes - daemon_reload: yes + enabled: true + daemon_reload: true -- name: include prerequisites.yml - include_tasks: prerequisites.yml +- name: Include prerequisites.yml + ansible.builtin.include_tasks: prerequisites.yml -- name: include registry.yml - include_tasks: registry.yml +- name: Include registry.yml + ansible.builtin.include_tasks: registry.yml when: ceph_docker_registry_auth | bool -- name: include fetch_image.yml - include_tasks: fetch_image.yml +- name: Include fetch_image.yml + ansible.builtin.include_tasks: fetch_image.yml tags: fetch_container_image -- name: get ceph version - command: > +- name: Get ceph version + ansible.builtin.command: > {{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph {{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }} --version changed_when: false - check_mode: no + check_mode: false register: ceph_version -- name: set_fact ceph_version ceph_version.stdout.split - set_fact: +- name: Set_fact ceph_version ceph_version.stdout.split + ansible.builtin.set_fact: ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" -- name: include release.yml - include_tasks: release.yml +- name: Include release.yml + ansible.builtin.include_tasks: release.yml diff --git a/roles/ceph-container-common/tasks/prerequisites.yml b/roles/ceph-container-common/tasks/prerequisites.yml index c29d225cb8..1d062a8d91 100644 --- a/roles/ceph-container-common/tasks/prerequisites.yml +++ b/roles/ceph-container-common/tasks/prerequisites.yml @@ -1,52 +1,52 @@ --- -- name: lvmetad tasks related +- name: Lvmetad tasks related when: - inventory_hostname in groups.get(osd_group_name, []) - lvmetad_disabled | default(False) | bool - ansible_facts['os_family'] == 'RedHat' - ansible_facts['distribution_major_version'] | int == 7 block: - - name: stop lvmetad - service: + - name: Stop lvmetad + ansible.builtin.service: name: lvm2-lvmetad state: stopped - - name: disable and mask lvmetad service - service: + - name: Disable and mask lvmetad service + ansible.builtin.systemd: name: lvm2-lvmetad - enabled: no - masked: yes + enabled: false + masked: true -- name: remove ceph udev rules - file: +- name: Remove ceph udev rules + ansible.builtin.file: path: "{{ item }}" state: absent with_items: - /usr/lib/udev/rules.d/95-ceph-osd.rules - /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules -- name: ensure tmpfiles.d is present - lineinfile: +- name: Ensure tmpfiles.d is present + ansible.builtin.lineinfile: path: /etc/tmpfiles.d/ceph-common.conf line: "d /run/ceph 0770 root root -" owner: root group: root - mode: 0644 + mode: "0644" state: present - create: yes + create: true -- name: restore certificates selinux context +- name: Restore certificates selinux context when: - ansible_facts['os_family'] == 'RedHat' - inventory_hostname in groups.get(mon_group_name, []) or inventory_hostname in groups.get(rgw_group_name, []) - command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted + ansible.builtin.command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted changed_when: false -- name: install python3 on osd nodes - package: +- name: Install python3 on osd nodes + ansible.builtin.package: name: python3 state: present when: - inventory_hostname in groups.get(osd_group_name, []) - - ansible_facts['os_family'] == 'RedHat' \ No newline at end of file + - ansible_facts['os_family'] == 'RedHat' diff --git a/roles/ceph-container-common/tasks/registry.yml b/roles/ceph-container-common/tasks/registry.yml index 91a08dcbe7..ef9f2ecd3d 100644 --- a/roles/ceph-container-common/tasks/registry.yml +++ b/roles/ceph-container-common/tasks/registry.yml @@ -1,11 +1,11 @@ --- -- name: container registry authentication - command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}' +- name: Container registry authentication + ansible.builtin.command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}' args: stdin: '{{ ceph_docker_registry_password }}' - stdin_add_newline: no + stdin_add_newline: false changed_when: false environment: HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}" HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}" - NO_PROXY: "{{ ceph_docker_no_proxy }}" \ No newline at end of file + NO_PROXY: "{{ ceph_docker_no_proxy }}" diff --git a/roles/ceph-container-common/tasks/release.yml b/roles/ceph-container-common/tasks/release.yml index fbe1d7aa18..34b936192e 100644 --- a/roles/ceph-container-common/tasks/release.yml +++ b/roles/ceph-container-common/tasks/release.yml @@ -1,45 +1,45 @@ --- -- name: set_fact ceph_release jewel - set_fact: +- name: Set_fact ceph_release jewel + ansible.builtin.set_fact: ceph_release: jewel when: ceph_version.split('.')[0] is version('10', '==') -- name: set_fact ceph_release kraken - set_fact: +- name: Set_fact ceph_release kraken + ansible.builtin.set_fact: ceph_release: kraken when: ceph_version.split('.')[0] is version('11', '==') -- name: set_fact ceph_release luminous - set_fact: +- name: Set_fact ceph_release luminous + ansible.builtin.set_fact: ceph_release: luminous when: ceph_version.split('.')[0] is version('12', '==') -- name: set_fact ceph_release mimic - set_fact: +- name: Set_fact ceph_release mimic + ansible.builtin.set_fact: ceph_release: mimic when: ceph_version.split('.')[0] is version('13', '==') -- name: set_fact ceph_release nautilus - set_fact: +- name: Set_fact ceph_release nautilus + ansible.builtin.set_fact: ceph_release: nautilus when: ceph_version.split('.')[0] is version('14', '==') -- name: set_fact ceph_release octopus - set_fact: +- name: Set_fact ceph_release octopus + ansible.builtin.set_fact: ceph_release: octopus when: ceph_version.split('.')[0] is version('15', '==') -- name: set_fact ceph_release pacific - set_fact: +- name: Set_fact ceph_release pacific + ansible.builtin.set_fact: ceph_release: pacific when: ceph_version.split('.')[0] is version('16', '==') -- name: set_fact ceph_release quincy - set_fact: +- name: Set_fact ceph_release quincy + ansible.builtin.set_fact: ceph_release: quincy when: ceph_version.split('.')[0] is version('17', '==') -- name: set_fact ceph_release reef - set_fact: +- name: Set_fact ceph_release reef + ansible.builtin.set_fact: ceph_release: reef when: ceph_version.split('.')[0] is version('18', '==') diff --git a/roles/ceph-container-engine/meta/main.yml b/roles/ceph-container-engine/meta/main.yml index f560ac2b21..6de5564808 100644 --- a/roles/ceph-container-engine/meta/main.yml +++ b/roles/ceph-container-engine/meta/main.yml @@ -4,14 +4,14 @@ galaxy_info: author: Guillaume Abrioux description: Handles container installation prerequisites license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: Ubuntu versions: - xenial - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-container-engine/tasks/main.yml b/roles/ceph-container-engine/tasks/main.yml index d328c124f6..70e9a6423c 100644 --- a/roles/ceph-container-engine/tasks/main.yml +++ b/roles/ceph-container-engine/tasks/main.yml @@ -1,4 +1,4 @@ --- -- name: include pre_requisites/prerequisites.yml - include_tasks: pre_requisites/prerequisites.yml +- name: Include pre_requisites/prerequisites.yml + ansible.builtin.include_tasks: pre_requisites/prerequisites.yml when: not is_atomic | bool diff --git a/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml b/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml index 7a965da985..dfeb2a05d4 100644 --- a/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml +++ b/roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml @@ -1,31 +1,31 @@ --- -- name: uninstall old docker versions - package: +- name: Uninstall old docker versions + ansible.builtin.package: name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc'] state: absent when: container_package_name == 'docker-ce' -- name: allow apt to use a repository over https (debian) - package: +- name: Allow apt to use a repository over https (debian) + ansible.builtin.package: name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common'] - update_cache: yes + update_cache: true register: result until: result is succeeded -- name: add docker's gpg key - apt_key: +- name: Add docker's gpg key + ansible.builtin.apt_key: url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg" register: result until: result is succeeded when: container_package_name == 'docker-ce' -- name: add docker repository - apt_repository: +- name: Add docker repository + ansible.builtin.apt_repository: repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable" when: container_package_name == 'docker-ce' -- name: add podman ppa repository - apt_repository: +- name: Add podman ppa repository + ansible.builtin.apt_repository: repo: "ppa:projectatomic/ppa" when: - container_package_name == 'podman' diff --git a/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml b/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml index 283ef96189..2ac0512812 100644 --- a/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml +++ b/roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml @@ -1,54 +1,55 @@ --- -- name: include specific variables - include_vars: "{{ item }}" +- name: Include specific variables + ansible.builtin.include_vars: "{{ item }}" with_first_found: - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml" - "{{ ansible_facts['os_family'] }}.yml" when: container_package_name is undefined and container_service_name is undefined -- name: debian based systems tasks - include_tasks: debian_prerequisites.yml +- name: Debian based systems tasks + ansible.builtin.include_tasks: debian_prerequisites.yml when: - ansible_facts['os_family'] == 'Debian' tags: with_pkg -- name: install container packages - package: +- name: Install container packages + ansible.builtin.package: name: '{{ container_package_name }}' update_cache: true register: result until: result is succeeded tags: with_pkg -- name: install lvm2 package - package: +- name: Install lvm2 package + ansible.builtin.package: name: lvm2 register: result until: result is succeeded tags: with_pkg when: inventory_hostname in groups.get(osd_group_name, []) -- name: extra configuration for docker +- name: Extra configuration for docker when: container_service_name == 'docker' block: - - name: create the systemd docker override directory - file: + - name: Create the systemd docker override directory + ansible.builtin.file: path: /etc/systemd/system/docker.service.d state: directory + mode: "0755" when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined - - name: create the systemd docker override file - template: + - name: Create the systemd docker override file + ansible.builtin.template: src: docker-proxy.conf.j2 dest: /etc/systemd/system/docker.service.d/proxy.conf - mode: 0600 + mode: "0600" owner: root group: root register: proxy_created when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined - - name: remove docker proxy configuration - file: + - name: Remove docker proxy configuration + ansible.builtin.file: path: /etc/systemd/system/docker.service.d/proxy.conf state: absent register: proxy_removed @@ -60,17 +61,17 @@ # have an immediate effect and not wait the end of the play. # using flush_handlers via the meta action plugin isn't enough too because # it flushes all handlers and not only the one notified in this role. - - name: restart docker - systemd: + - name: Restart docker + ansible.builtin.systemd: name: "{{ container_service_name }}" state: restarted - daemon_reload: yes + daemon_reload: true when: proxy_created.changed | bool or proxy_removed.changed | bool - - name: start container service - service: + - name: Start container service + ansible.builtin.service: name: '{{ container_service_name }}' state: started - enabled: yes + enabled: true tags: with_pkg diff --git a/roles/ceph-crash/meta/main.yml b/roles/ceph-crash/meta/main.yml index 43578ce14d..63bc64d10c 100644 --- a/roles/ceph-crash/meta/main.yml +++ b/roles/ceph-crash/meta/main.yml @@ -4,12 +4,11 @@ galaxy_info: author: Guillaume Abrioux description: Deploy ceph-crash license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 - - 8 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-crash/tasks/main.yml b/roles/ceph-crash/tasks/main.yml index 51c4aeb58f..82a1921d20 100644 --- a/roles/ceph-crash/tasks/main.yml +++ b/roles/ceph-crash/tasks/main.yml @@ -1,8 +1,8 @@ --- -- name: create and copy client.crash keyring +- name: Create and copy client.crash keyring when: cephx | bool block: - - name: create client.crash keyring + - name: Create client.crash keyring ceph_key: name: "client.crash" caps: @@ -10,7 +10,7 @@ mgr: 'allow profile crash' cluster: "{{ cluster }}" dest: "{{ ceph_conf_key_directory }}" - import_key: True + import_key: true mode: "{{ ceph_keyring_permissions }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -18,10 +18,10 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" - run_once: True + run_once: true no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: get keys from monitors + - name: Get keys from monitors ceph_key: name: client.crash cluster: "{{ cluster }}" @@ -35,8 +35,8 @@ run_once: true no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: copy ceph key(s) if needed - copy: + - name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring" content: "{{ _crash_keys.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -44,24 +44,24 @@ mode: "{{ ceph_keyring_permissions }}" no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: start ceph-crash daemon +- name: Start ceph-crash daemon when: containerized_deployment | bool block: - - name: create /var/lib/ceph/crash/posted - file: + - name: Create /var/lib/ceph/crash/posted + ansible.builtin.file: path: /var/lib/ceph/crash/posted state: directory mode: '0755' owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" - - name: include_tasks systemd.yml - include_tasks: systemd.yml + - name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: start the ceph-crash service - systemd: +- name: Start the ceph-crash service + ansible.builtin.systemd: name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}" state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-crash/tasks/systemd.yml b/roles/ceph-crash/tasks/systemd.yml index 3b2ded8075..a8c07b1143 100644 --- a/roles/ceph-crash/tasks/systemd.yml +++ b/roles/ceph-crash/tasks/systemd.yml @@ -1,9 +1,9 @@ --- -- name: generate systemd unit file for ceph-crash container - template: +- name: Generate systemd unit file for ceph-crash container + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-crash.service.j2" dest: /etc/systemd/system/ceph-crash@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph crash \ No newline at end of file + notify: Restart ceph crash diff --git a/roles/ceph-dashboard/meta/main.yml b/roles/ceph-dashboard/meta/main.yml index 464f131bca..8e99090a71 100644 --- a/roles/ceph-dashboard/meta/main.yml +++ b/roles/ceph-dashboard/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Boris Ranto description: Configures Ceph Dashboard license: Apache - min_ansible_version: 2.4 + min_ansible_version: '2.4' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-dashboard/tasks/configure_dashboard.yml b/roles/ceph-dashboard/tasks/configure_dashboard.yml index ad10b013f1..a3fec0c0df 100644 --- a/roles/ceph-dashboard/tasks/configure_dashboard.yml +++ b/roles/ceph-dashboard/tasks/configure_dashboard.yml @@ -1,36 +1,38 @@ --- -- import_role: +- name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: container_binary.yml delegate_to: "{{ groups[mon_group_name][0] }}" delegate_facts: true -- name: set_fact container_exec_cmd - set_fact: +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" when: containerized_deployment | bool -- name: set_fact container_run_cmd - set_fact: +- name: Set_fact container_run_cmd + ansible.builtin.set_fact: ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" -- name: get current mgr backend - ipv4 - set_fact: +- name: Get current mgr backend - ipv4 + ansible.builtin.set_fact: dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}" when: ip_version == 'ipv4' loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}" delegate_to: "{{ item }}" - delegate_facts: True + delegate_facts: true -- name: get current mgr backend - ipv6 - set_fact: +- name: Get current mgr backend - ipv6 + ansible.builtin.set_fact: dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}" when: ip_version == 'ipv6' loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}" delegate_to: "{{ item }}" - delegate_facts: True + delegate_facts: true -- include_role: +- name: Include ceph-facts role + ansible.builtin.include_role: name: ceph-facts tasks_from: set_radosgw_address.yml loop: "{{ groups.get(rgw_group_name, []) }}" @@ -39,100 +41,103 @@ loop_var: ceph_dashboard_call_item when: inventory_hostname in groups.get(rgw_group_name, []) -- name: disable SSL for dashboard +- name: Disable SSL for dashboard when: dashboard_protocol == "http" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true block: - - name: get SSL status for dashboard - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl" - changed_when: false - register: current_ssl_for_dashboard + - name: Get SSL status for dashboard + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl" + changed_when: false + register: current_ssl_for_dashboard - - name: disable SSL for dashboard - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false" - when: current_ssl_for_dashboard.stdout == "true" + - name: Disable SSL for dashboard + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false" + changed_when: false + when: current_ssl_for_dashboard.stdout == "true" -- name: with SSL for dashboard +- name: With SSL for dashboard when: dashboard_protocol == "https" block: - - name: enable SSL for dashboard - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true" + - name: Enable SSL for dashboard + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true + changed_when: false - - name: copy dashboard SSL certificate file - copy: + - name: Copy dashboard SSL certificate file + ansible.builtin.copy: src: "{{ dashboard_crt }}" dest: "/etc/ceph/ceph-dashboard.crt" owner: root group: root - mode: 0440 + mode: "0440" remote_src: "{{ dashboard_tls_external | bool }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: dashboard_crt | length > 0 - - name: copy dashboard SSL certificate key - copy: + - name: Copy dashboard SSL certificate key + ansible.builtin.copy: src: "{{ dashboard_key }}" dest: "/etc/ceph/ceph-dashboard.key" owner: root group: root - mode: 0440 + mode: "0440" remote_src: "{{ dashboard_tls_external | bool }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: dashboard_key | length > 0 - - name: generate and copy self-signed certificate + - name: Generate and copy self-signed certificate when: dashboard_key | length == 0 or dashboard_crt | length == 0 run_once: true block: - - name: set_fact subj_alt_names - set_fact: + - name: Set_fact subj_alt_names + ansible.builtin.set_fact: subj_alt_names: > - {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} - DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %} - {%- endfor -%} + {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}{%- endfor -%} - - name: create tempfile for openssl certificate and key generation - tempfile: + - name: Create tempfile for openssl certificate and key generation + ansible.builtin.tempfile: state: file register: openssl_config_file - - name: copy the openssl configuration file - copy: + - name: Copy the openssl configuration file + ansible.builtin.copy: src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}" dest: '{{ openssl_config_file.path }}' remote_src: true + mode: "0644" - - name: add subjectAltName to the openssl configuration - ini_file: + - name: Add subjectAltName to the openssl configuration + community.general.ini_file: path: '{{ openssl_config_file.path }}' section: v3_ca option: subjectAltName value: '{{ subj_alt_names | trim }}' + mode: "0644" - - name: generate a Self Signed OpenSSL certificate for dashboard - shell: | + - name: Generate a Self Signed OpenSSL certificate for dashboard + ansible.builtin.shell: | test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \ openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca + changed_when: false - - name: remove the openssl tempfile - file: + - name: Remove the openssl tempfile + ansible.builtin.file: path: '{{ openssl_config_file.path }}' state: absent - - name: slurp self-signed generated certificate for dashboard - slurp: + - name: Slurp self-signed generated certificate for dashboard + ansible.builtin.slurp: src: "/etc/ceph/{{ item }}" - run_once: True + run_once: true with_items: - 'ceph-dashboard.key' - 'ceph-dashboard.crt' register: slurp_self_signed_crt - - name: copy self-signed generated certificate on mons - copy: + - name: Copy self-signed generated certificate on mons + ansible.builtin.copy: dest: "{{ item.0.source }}" content: "{{ item.0.content | b64decode }}" owner: "{{ ceph_uid }}" @@ -143,39 +148,39 @@ - "{{ slurp_self_signed_crt.results }}" - "{{ groups[mon_group_name] }}" - - name: import dashboard certificate file - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt" + - name: Import dashboard certificate file + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true - - name: import dashboard certificate key - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key" + - name: Import dashboard certificate key + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true -- name: "set the dashboard port ({{ dashboard_port }})" - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}" +- name: Set the dashboard port + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true -- name: "set the dashboard SSL port ({{ dashboard_port }})" - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}" +- name: Set the dashboard SSL port + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0 -- name: config the current dashboard backend - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}" +- name: Config the current dashboard backend + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false run_once: true with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}' -- name: disable mgr dashboard module (restart) +- name: Disable mgr dashboard module (restart) ceph_mgr_module: name: dashboard cluster: "{{ cluster }}" @@ -186,7 +191,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true -- name: enable mgr dashboard module (restart) +- name: Enable mgr dashboard module (restart) ceph_mgr_module: name: dashboard cluster: "{{ cluster }}" @@ -197,7 +202,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true -- name: create dashboard admin user +- name: Create dashboard admin user ceph_dashboard_user: name: "{{ dashboard_admin_user }}" cluster: "{{ cluster }}" @@ -209,30 +214,30 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: disable unused dashboard features - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}" +- name: Disable unused dashboard features + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false with_items: "{{ dashboard_disabled_features }}" -- name: set grafana api user - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}" +- name: Set grafana api user + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false -- name: set grafana api password - command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -" +- name: Set grafana api password + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -" args: stdin: "{{ grafana_admin_password }}" - stdin_add_newline: no + stdin_add_newline: false delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false -- name: disable ssl verification for grafana - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False" +- name: Disable ssl verification for grafana + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false @@ -240,101 +245,102 @@ - dashboard_protocol == "https" - dashboard_grafana_api_no_ssl_verify | bool -- name: set alertmanager host - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}" +- name: Set alertmanager host + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false -- name: set prometheus host - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}" +- name: Set prometheus host + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false -- include_tasks: configure_grafana_layouts.yml +- name: Include grafana layout tasks + ansible.builtin.include_tasks: configure_grafana_layouts.yml with_items: '{{ grafana_server_addrs }}' vars: grafana_server_addr: '{{ item }}' -- name: config monitoring api url vip +- name: Config monitoring api url vip run_once: true block: - - name: config grafana api url vip - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}" + - name: Config grafana api url vip + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0 - - name: config alertmanager api url - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}" + - name: Config alertmanager api url + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0 - - name: config prometheus api url - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}" + - name: Config prometheus api url + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0 -- name: dashboard object gateway management frontend +- name: Dashboard object gateway management frontend when: groups.get(rgw_group_name, []) | length > 0 run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" block: - - name: set the rgw credentials - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials" + - name: Set the rgw credentials + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials" changed_when: false register: result until: result is succeeded retries: 5 - - name: set the rgw admin resource - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}" + - name: Set the rgw admin resource + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}" changed_when: false when: dashboard_rgw_api_admin_resource | length > 0 - - name: disable ssl verification for rgw - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False" + - name: Disable ssl verification for rgw + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False" changed_when: false when: - dashboard_rgw_api_no_ssl_verify | bool - radosgw_frontend_ssl_certificate | length > 0 -- name: dashboard iscsi management +- name: Dashboard iscsi management when: groups.get(iscsi_gw_group_name, []) | length > 0 run_once: true block: - - name: disable iscsi api ssl verification - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false" + - name: Disable iscsi api ssl verification + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" when: - api_secure | default(false) | bool - generate_crt | default(false) | bool - - name: add iscsi gateways - ipv4 - command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -" + - name: Add iscsi gateways - ipv4 + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -" args: stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}" - stdin_add_newline: no + stdin_add_newline: false changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups[iscsi_gw_group_name] }}" when: ip_version == 'ipv4' - - name: add iscsi gateways - ipv6 - command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -" + - name: Add iscsi gateways - ipv6 + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -" args: stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ansible.utils.ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}" - stdin_add_newline: no + stdin_add_newline: false changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" with_items: "{{ groups[iscsi_gw_group_name] }}" when: ip_version == 'ipv6' -- name: disable mgr dashboard module (restart) +- name: Disable mgr dashboard module (restart) ceph_mgr_module: name: dashboard cluster: "{{ cluster }}" @@ -345,7 +351,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true -- name: enable mgr dashboard module (restart) +- name: Enable mgr dashboard module (restart) ceph_mgr_module: name: dashboard cluster: "{{ cluster }}" diff --git a/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml b/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml index 556c5870c0..2b386acf7c 100644 --- a/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml +++ b/roles/ceph-dashboard/tasks/configure_grafana_layouts.yml @@ -1,12 +1,12 @@ --- -- name: set grafana url - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}" +- name: Set grafana url + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false -- name: inject grafana dashboard layouts - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update" +- name: Inject grafana dashboard layouts + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true changed_when: false diff --git a/roles/ceph-dashboard/tasks/main.yml b/roles/ceph-dashboard/tasks/main.yml index 723b316496..581cf96c12 100644 --- a/roles/ceph-dashboard/tasks/main.yml +++ b/roles/ceph-dashboard/tasks/main.yml @@ -1,8 +1,8 @@ --- -- name: include configure_dashboard.yml - include_tasks: configure_dashboard.yml +- name: Include configure_dashboard.yml + ansible.builtin.include_tasks: configure_dashboard.yml -- name: print dashboard URL - debug: +- name: Print dashboard URL + ansible.builtin.debug: msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password." run_once: true diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index e99adf9648..7d6be078c7 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -66,7 +66,7 @@ adopt_label_group_names: # If configure_firewall is true, then ansible will try to configure the # appropriate firewalling rules so that Ceph daemons can communicate # with each others. -configure_firewall: True +configure_firewall: true # Open ports on corresponding nodes if firewall is installed on it ceph_mon_firewall_zone: public @@ -112,7 +112,7 @@ ntp_daemon_type: chronyd # This variable determines if ceph packages can be updated. If False, the # package resources will use "state=present". If True, they will use # "state=latest". -upgrade_ceph_packages: False +upgrade_ceph_packages: false ceph_use_distro_backports: false # DEBIAN ONLY ceph_directories_mode: "0755" @@ -163,7 +163,7 @@ libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubun # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ # for more info read: https://github.com/ceph/ceph-ansible/issues/305 -#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" +# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}" # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0) @@ -221,7 +221,7 @@ ceph_iscsi_config_dev: true # special repo for deploying iSCSI gateways # a URL to the .repo file to be installed on the targets. For deb, # ceph_custom_repo should be the URL to the repo base. # -#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc +# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc ceph_custom_repo: https://server.domain.com/ceph-custom-repo @@ -230,14 +230,14 @@ ceph_custom_repo: https://server.domain.com/ceph-custom-repo # Enabled when ceph_repository == 'local' # # Path to DESTDIR of the ceph install -#ceph_installation_dir: "/path/to/ceph_installation/" +# ceph_installation_dir: "/path/to/ceph_installation/" # Whether or not to use installer script rundep_installer.sh # This script takes in rundep and installs the packages line by line onto the machine # If this is set to false then it is assumed that the machine ceph is being copied onto will already have # all runtime dependencies installed -#use_installer: false +# use_installer: false # Root directory for ceph-ansible -#ansible_dir: "/path/to/ceph-ansible" +# ansible_dir: "/path/to/ceph-ansible" ###################### @@ -320,12 +320,12 @@ monitor_address_block: subnet ip_version: ipv4 mon_host_v1: - enabled: True + enabled: true suffix: ':6789' mon_host_v2: suffix: ':3300' -enable_ceph_volume_debug: False +enable_ceph_volume_debug: false ########## # CEPHFS # @@ -397,7 +397,7 @@ email_address: foo@bar.com ## Testing mode # enable this mode _only_ when you have a single node # if you don't want it keep the option commented -#common_single_host_mode: true +# common_single_host_mode: true ## Handlers - restarting daemons after a config change # if for whatever reasons the content of your ceph configuration changes @@ -519,16 +519,16 @@ ceph_docker_image: "ceph/daemon-base" ceph_docker_image_tag: latest-main ceph_docker_registry: quay.io ceph_docker_registry_auth: false -#ceph_docker_registry_username: -#ceph_docker_registry_password: -#ceph_docker_http_proxy: -#ceph_docker_https_proxy: +# ceph_docker_registry_username: +# ceph_docker_registry_password: +# ceph_docker_http_proxy: +# ceph_docker_https_proxy: ceph_docker_no_proxy: "localhost,127.0.0.1" ## Client only docker image - defaults to {{ ceph_docker_image }} ceph_client_docker_image: "{{ ceph_docker_image }}" ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}" ceph_client_docker_registry: "{{ ceph_docker_registry }}" -containerized_deployment: False +containerized_deployment: false container_binary: timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}" @@ -555,7 +555,7 @@ openstack_config: false # name: "images" # rule_name: "my_replicated_rule" # application: "rbd" -# pg_autoscale_mode: False +# pg_autoscale_mode: false # pg_num: 16 # pgp_num: 16 # target_size_ratio: 0.2 @@ -605,7 +605,7 @@ openstack_keys: ############# # DASHBOARD # ############# -dashboard_enabled: True +dashboard_enabled: true # Choose http or https # For https, you should set dashboard.crt/key and grafana.crt/key # If you define the dashboard_crt and dashboard_key variables, but leave them as '', @@ -617,7 +617,7 @@ dashboard_network: "{{ public_network }}" dashboard_admin_user: admin dashboard_admin_user_ro: false # This variable must be set with a strong custom password when dashboard_enabled is True -#dashboard_admin_password: p@ssw0rd +# dashboard_admin_password: p@ssw0rd # We only need this for SSL (https) connections dashboard_crt: '' dashboard_key: '' @@ -626,7 +626,7 @@ dashboard_tls_external: false dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}" dashboard_rgw_api_user_id: ceph-dashboard dashboard_rgw_api_admin_resource: '' -dashboard_rgw_api_no_ssl_verify: False +dashboard_rgw_api_no_ssl_verify: false dashboard_frontend_vip: '' dashboard_disabled_features: [] prometheus_frontend_vip: '' @@ -635,7 +635,7 @@ node_exporter_container_image: "docker.io/prom/node-exporter:v0.17.0" node_exporter_port: 9100 grafana_admin_user: admin # This variable must be set with a strong custom password when dashboard_enabled is True -#grafana_admin_password: admin +# grafana_admin_password: admin # We only need this for SSL (https) connections grafana_crt: '' grafana_key: '' @@ -667,7 +667,7 @@ grafana_dashboard_files: grafana_plugins: - vonage-status-panel - grafana-piechart-panel -grafana_allow_embedding: True +grafana_allow_embedding: true grafana_port: 3000 grafana_network: "{{ public_network }}" grafana_conf_overrides: {} @@ -683,7 +683,7 @@ prometheus_port: 9092 prometheus_conf_overrides: {} # Uncomment out this variable if you need to customize the retention period for prometheus storage. # set it to '30d' if you want to retain 30 days of data. -#prometheus_storage_tsdb_retention_time: 15d +# prometheus_storage_tsdb_retention_time: 15d alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2" alertmanager_container_cpu_period: 100000 alertmanager_container_cpu_cores: 2 @@ -741,11 +741,11 @@ gateway_ip_list: 0.0.0.0 # # Example: # -#rbd_devices: -# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } -# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } +# rbd_devices: +# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' } +# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' } rbd_devices: {} # client_connections defines the client ACL's to restrict client access to specific LUNs @@ -759,20 +759,19 @@ rbd_devices: {} # # Example: # -#client_connections: -# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } -# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } +# client_connections: +# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' } +# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' } client_connections: {} -no_log_on_ceph_key_tasks: True +no_log_on_ceph_key_tasks: true ############### # DEPRECATION # ############### - ###################################################### # VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER # # *DO NOT* MODIFY THEM # @@ -780,4 +779,4 @@ no_log_on_ceph_key_tasks: True container_exec_cmd: docker: false -ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" +ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}" diff --git a/roles/ceph-defaults/meta/main.yml b/roles/ceph-defaults/meta/main.yml index d8aa0769ec..ec8d1964b8 100644 --- a/roles/ceph-defaults/meta/main.yml +++ b/roles/ceph-defaults/meta/main.yml @@ -4,14 +4,14 @@ galaxy_info: author: Sébastien Han description: Handles ceph-ansible default vars for all roles license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: Ubuntu versions: - xenial - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml index 73b314ff7c..ed97d539c0 100644 --- a/roles/ceph-defaults/tasks/main.yml +++ b/roles/ceph-defaults/tasks/main.yml @@ -1 +1 @@ ---- \ No newline at end of file +--- diff --git a/roles/ceph-defaults/vars/main.yml b/roles/ceph-defaults/vars/main.yml index c53d20846f..3716c4d083 100644 --- a/roles/ceph-defaults/vars/main.yml +++ b/roles/ceph-defaults/vars/main.yml @@ -1,3 +1,3 @@ --- ceph_osd_pool_default_crush_rule: -1 -ceph_osd_pool_default_crush_rule_name: "replicated_rule" \ No newline at end of file +ceph_osd_pool_default_crush_rule_name: "replicated_rule" diff --git a/roles/ceph-facts/meta/main.yml b/roles/ceph-facts/meta/main.yml index b834c53089..78818ae247 100644 --- a/roles/ceph-facts/meta/main.yml +++ b/roles/ceph-facts/meta/main.yml @@ -4,14 +4,14 @@ galaxy_info: author: Guillaume Abrioux description: Set some facts for ceph to be deployed license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: Ubuntu versions: - xenial - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-facts/tasks/container_binary.yml b/roles/ceph-facts/tasks/container_binary.yml index e41c24017a..2f1355c591 100644 --- a/roles/ceph-facts/tasks/container_binary.yml +++ b/roles/ceph-facts/tasks/container_binary.yml @@ -1,10 +1,10 @@ --- -- name: check if podman binary is present - stat: +- name: Check if podman binary is present + ansible.builtin.stat: path: /usr/bin/podman register: podman_binary -- name: set_fact container_binary - set_fact: +- name: Set_fact container_binary + ansible.builtin.set_fact: container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] in ['8', '9']) else 'docker' }}" when: not docker2podman | default(false) | bool diff --git a/roles/ceph-facts/tasks/convert_grafana_server_group_name.yml b/roles/ceph-facts/tasks/convert_grafana_server_group_name.yml index 6e8912e042..5d26298f7a 100644 --- a/roles/ceph-facts/tasks/convert_grafana_server_group_name.yml +++ b/roles/ceph-facts/tasks/convert_grafana_server_group_name.yml @@ -1,9 +1,9 @@ --- -- name: convert grafana-server group name if exist - add_host: +- name: Convert grafana-server group name if exist + ansible.builtin.add_host: name: "{{ item }}" groups: "{{ monitoring_group_name }}" ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}" ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}" - with_items: "{{ groups.get((grafana_server_group_name|default('grafana-server')), []) }}" - run_once: True + with_items: "{{ groups.get((grafana_server_group_name | default('grafana-server')), []) }}" + run_once: true diff --git a/roles/ceph-facts/tasks/devices.yml b/roles/ceph-facts/tasks/devices.yml index a66fafd9f0..ea20c252d5 100644 --- a/roles/ceph-facts/tasks/devices.yml +++ b/roles/ceph-facts/tasks/devices.yml @@ -1,56 +1,56 @@ --- -- name: resolve devices +- name: Resolve devices when: - devices is defined - not osd_auto_discovery | default(False) | bool block: - - name: resolve device link(s) - command: readlink -f {{ item }} + - name: Resolve device link(s) + ansible.builtin.command: readlink -f {{ item }} changed_when: false - check_mode: no + check_mode: false with_items: "{{ devices }}" register: devices_prepare_canonicalize - - name: set_fact build devices from resolved symlinks - set_fact: - devices: "{{ devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search','/dev/disk') | list | unique }}" + - name: Set_fact build devices from resolved symlinks + ansible.builtin.set_fact: + devices: "{{ devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}" -- name: resolve dedicated_device +- name: Resolve dedicated_device when: - dedicated_devices is defined - not osd_auto_discovery | default(False) | bool block: - - name: resolve dedicated_device link(s) - command: readlink -f {{ item }} + - name: Resolve dedicated_device link(s) + ansible.builtin.command: readlink -f {{ item }} changed_when: false - check_mode: no + check_mode: false with_items: "{{ dedicated_devices }}" register: dedicated_devices_prepare_canonicalize - - name: set_fact build dedicated_devices from resolved symlinks - set_fact: - dedicated_devices: "{{ dedicated_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search','/dev/disk') | list | unique }}" + - name: Set_fact build dedicated_devices from resolved symlinks + ansible.builtin.set_fact: + dedicated_devices: "{{ dedicated_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}" -- name: resolve bluestore_wal_device +- name: Resolve bluestore_wal_device when: - bluestore_wal_devices is defined - not osd_auto_discovery | default(False) | bool block: - - name: resolve bluestore_wal_device link(s) - command: readlink -f {{ item }} + - name: Resolve bluestore_wal_device link(s) + ansible.builtin.command: readlink -f {{ item }} changed_when: false - check_mode: no + check_mode: false with_items: "{{ bluestore_wal_devices }}" register: bluestore_wal_devices_prepare_canonicalize - - name: set_fact build bluestore_wal_devices from resolved symlinks - set_fact: - bluestore_wal_devices: "{{ bluestore_wal_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search','/dev/disk') | list | unique }}" + - name: Set_fact build bluestore_wal_devices from resolved symlinks + ansible.builtin.set_fact: + bluestore_wal_devices: "{{ bluestore_wal_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}" -- name: set_fact devices generate device list when osd_auto_discovery +- name: Set_fact devices generate device list when osd_auto_discovery vars: device: "{{ item.key | regex_replace('^', '/dev/') }}" - set_fact: + ansible.builtin.set_fact: devices: "{{ devices | default([]) | union([device]) }}" with_dict: "{{ ansible_facts['devices'] }}" when: diff --git a/roles/ceph-facts/tasks/facts.yml b/roles/ceph-facts/tasks/facts.yml index 36124378cf..26f2f97875 100644 --- a/roles/ceph-facts/tasks/facts.yml +++ b/roles/ceph-facts/tasks/facts.yml @@ -1,45 +1,45 @@ --- -- name: check if it is atomic host - stat: +- name: Check if it is atomic host + ansible.builtin.stat: path: /run/ostree-booted register: stat_ostree -- name: set_fact is_atomic - set_fact: +- name: Set_fact is_atomic + ansible.builtin.set_fact: is_atomic: "{{ stat_ostree.stat.exists }}" -- name: import_tasks container_binary.yml - import_tasks: container_binary.yml +- name: Import_tasks container_binary.yml + ansible.builtin.import_tasks: container_binary.yml -- name: set_fact ceph_cmd - set_fact: +- name: Set_fact ceph_cmd + ansible.builtin.set_fact: ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" # In case ansible_python_interpreter is set by the user, # ansible will not discover python and discovered_interpreter_python # will not be set -- name: set_fact discovered_interpreter_python - set_fact: +- name: Set_fact discovered_interpreter_python + ansible.builtin.set_fact: discovered_interpreter_python: "{{ ansible_python_interpreter }}" when: ansible_python_interpreter is defined # If ansible_python_interpreter is not defined, this can result in the # discovered_interpreter_python fact from being set. This fails later in this # playbook and is used elsewhere. -- name: set_fact discovered_interpreter_python if not previously set - set_fact: +- name: Set_fact discovered_interpreter_python if not previously set + ansible.builtin.set_fact: discovered_interpreter_python: "{{ ansible_facts['discovered_interpreter_python'] }}" when: - discovered_interpreter_python is not defined - ansible_facts['discovered_interpreter_python'] is defined # Set ceph_release to ceph_stable by default -- name: set_fact ceph_release ceph_stable_release - set_fact: +- name: Set_fact ceph_release ceph_stable_release + ansible.builtin.set_fact: ceph_release: "{{ ceph_stable_release }}" -- name: set_fact monitor_name ansible_facts['hostname'] - set_fact: +- name: Set_fact monitor_name ansible_facts['hostname'] + ansible.builtin.set_fact: monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}" delegate_to: "{{ item }}" delegate_facts: true @@ -47,30 +47,31 @@ run_once: true when: groups.get(mon_group_name, []) | length > 0 -- name: find a running monitor +- name: Find a running monitor when: groups.get(mon_group_name, []) | length > 0 block: - - name: set_fact container_exec_cmd - set_fact: + - name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}" when: - containerized_deployment | bool - - name: find a running mon container - command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" + - name: Find a running mon container + ansible.builtin.command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" register: find_running_mon_container failed_when: false run_once: true delegate_to: "{{ item }}" with_items: "{{ groups.get(mon_group_name, []) }}" + changed_when: false when: - containerized_deployment | bool - - name: check for a ceph mon socket - shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok + - name: Check for a ceph mon socket + ansible.builtin.shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok changed_when: false failed_when: false - check_mode: no + check_mode: false register: mon_socket_stat run_once: true delegate_to: "{{ item }}" @@ -78,11 +79,11 @@ when: - not containerized_deployment | bool - - name: check if the ceph mon socket is in-use - command: grep -q {{ item.stdout }} /proc/net/unix + - name: Check if the ceph mon socket is in-use + ansible.builtin.command: grep -q {{ item.stdout }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: mon_socket run_once: true delegate_to: "{{ hostvars[item.item]['inventory_hostname'] }}" @@ -91,8 +92,8 @@ - not containerized_deployment | bool - item.rc == 0 - - name: set_fact running_mon - non_container - set_fact: + - name: Set_fact running_mon - non_container + ansible.builtin.set_fact: running_mon: "{{ hostvars[item.item.item]['inventory_hostname'] }}" with_items: "{{ mon_socket.results }}" run_once: true @@ -101,8 +102,8 @@ - item.rc is defined - item.rc == 0 - - name: set_fact running_mon - container - set_fact: + - name: Set_fact running_mon - container + ansible.builtin.set_fact: running_mon: "{{ item.item }}" run_once: true with_items: "{{ find_running_mon_container.results }}" @@ -110,8 +111,8 @@ - containerized_deployment | bool - item.stdout_lines | default([]) | length > 0 - - name: set_fact _container_exec_cmd - set_fact: + - name: Set_fact _container_exec_cmd + ansible.builtin.set_fact: _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}" when: - containerized_deployment | bool @@ -119,11 +120,11 @@ # this task shouldn't run in a rolling_update situation # because it blindly picks a mon, which may be down because # of the rolling update - - name: get current fsid if cluster is already running - command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid" + - name: Get current fsid if cluster is already running + ansible.builtin.command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid" changed_when: false failed_when: false - check_mode: no + check_mode: false register: current_fsid run_once: true delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}" @@ -132,118 +133,120 @@ # set this as a default when performing a rolling_update # so the rest of the tasks here will succeed -- name: set_fact current_fsid rc 1 - set_fact: +- name: Set_fact current_fsid rc 1 + ansible.builtin.set_fact: current_fsid: rc: 1 when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0 -- name: get current fsid - command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid" +- name: Get current fsid + ansible.builtin.command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid" register: rolling_update_fsid delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}" until: rolling_update_fsid is succeeded + changed_when: false when: - rolling_update | bool - groups.get(mon_group_name, []) | length > 0 -- name: set_fact fsid - set_fact: +- name: Set_fact fsid + ansible.builtin.set_fact: fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}" when: - rolling_update | bool - groups.get(mon_group_name, []) | length > 0 -- name: set_fact fsid from current_fsid - set_fact: +- name: Set_fact fsid from current_fsid + ansible.builtin.set_fact: fsid: "{{ current_fsid.stdout }}" run_once: true when: current_fsid.rc == 0 -- name: fsid related tasks +- name: Fsid related tasks when: - generate_fsid | bool - current_fsid.rc != 0 - not rolling_update | bool block: - - name: generate cluster fsid - command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'" + - name: Generate cluster fsid + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'" register: cluster_uuid delegate_to: "{{ groups[mon_group_name][0] }}" + changed_when: false run_once: true - - name: set_fact fsid - set_fact: + - name: Set_fact fsid + ansible.builtin.set_fact: fsid: "{{ cluster_uuid.stdout }}" -- name: import_tasks devices.yml - import_tasks: devices.yml +- name: Import_tasks devices.yml + ansible.builtin.import_tasks: devices.yml when: inventory_hostname in groups.get(osd_group_name, []) -- name: check if the ceph conf exists - stat: +- name: Check if the ceph conf exists + ansible.builtin.stat: path: '/etc/ceph/{{ cluster }}.conf' register: ceph_conf -- name: set default osd_pool_default_crush_rule fact - set_fact: +- name: Set default osd_pool_default_crush_rule fact + ansible.builtin.set_fact: osd_pool_default_crush_rule: "{{ ceph_osd_pool_default_crush_rule }}" -- name: get default crush rule value from ceph configuration +- name: Get default crush rule value from ceph configuration + when: ceph_conf.stat.exists | bool block: - &read-osd-pool-default-crush-rule - name: read osd pool default crush rule - command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf + name: Read osd pool default crush rule + ansible.builtin.command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf register: crush_rule_variable changed_when: false - check_mode: no + check_mode: false failed_when: crush_rule_variable.rc not in (0, 1) - &set-osd-pool-default-crush-rule-fact - name: set osd_pool_default_crush_rule fact - set_fact: + name: Set osd_pool_default_crush_rule fact + ansible.builtin.set_fact: osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] }}" when: crush_rule_variable.rc == 0 - when: ceph_conf.stat.exists | bool -- name: get default crush rule value from running monitor ceph configuration - block: - - <<: *read-osd-pool-default-crush-rule - delegate_to: "{{ running_mon }}" - - *set-osd-pool-default-crush-rule-fact +- name: Get default crush rule value from running monitor ceph configuration when: - running_mon is defined - not ceph_conf.stat.exists | bool + block: + - <<: *read-osd-pool-default-crush-rule # noqa: name[casing] + delegate_to: "{{ running_mon }}" + - *set-osd-pool-default-crush-rule-fact -- name: import_tasks set_monitor_address.yml - import_tasks: set_monitor_address.yml +- name: Import_tasks set_monitor_address.yml + ansible.builtin.import_tasks: set_monitor_address.yml when: groups.get(mon_group_name, []) | length > 0 -- name: import_tasks set_radosgw_address.yml - include_tasks: set_radosgw_address.yml +- name: Import_tasks set_radosgw_address.yml + ansible.builtin.include_tasks: set_radosgw_address.yml when: inventory_hostname in groups.get(rgw_group_name, []) -- name: set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli - set_fact: - use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}" +- name: Set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli + ansible.builtin.set_fact: + use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}" when: iscsi_gw_group_name in group_names -- name: set_fact ceph_run_cmd - set_fact: +- name: Set_fact ceph_run_cmd + ansible.builtin.set_fact: ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}" delegate_to: "{{ item }}" - delegate_facts: True - run_once: True + delegate_facts: true + run_once: true with_items: - "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}" - "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}" - "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}" -- name: set_fact ceph_admin_command - set_fact: +- name: Set_fact ceph_admin_command + ansible.builtin.set_fact: ceph_admin_command: "{{ hostvars[item]['ceph_run_cmd'] }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring" delegate_to: "{{ item }}" - delegate_facts: True - run_once: True + delegate_facts: true + run_once: true with_items: - "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}" - "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}" diff --git a/roles/ceph-facts/tasks/get_def_crush_rule_name.yml b/roles/ceph-facts/tasks/get_def_crush_rule_name.yml index 70ba069501..55d3c7792b 100644 --- a/roles/ceph-facts/tasks/get_def_crush_rule_name.yml +++ b/roles/ceph-facts/tasks/get_def_crush_rule_name.yml @@ -1,5 +1,5 @@ --- -- name: get current default crush rule details +- name: Get current default crush rule details ceph_crush_rule: cluster: "{{ cluster }}" state: info @@ -10,9 +10,9 @@ delegate_to: "{{ delegated_node | default(groups[mon_group_name][0]) }}" run_once: true -- name: get current default crush rule name - set_fact: +- name: Get current default crush rule name + ansible.builtin.set_fact: ceph_osd_pool_default_crush_rule_name: "{{ item.rule_name }}" with_items: "{{ default_crush_rule_details.stdout | default('{}', True) | from_json }}" - run_once: True + run_once: true when: item.rule_id | int == osd_pool_default_crush_rule | int diff --git a/roles/ceph-facts/tasks/grafana.yml b/roles/ceph-facts/tasks/grafana.yml index 90c01bfb7e..df964dedb8 100644 --- a/roles/ceph-facts/tasks/grafana.yml +++ b/roles/ceph-facts/tasks/grafana.yml @@ -1,5 +1,5 @@ -- name: set grafana_server_addr fact - ipv4 - set_fact: +- name: Set grafana_server_addr fact - ipv4 + ansible.builtin.set_fact: grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first }}" when: - groups.get(monitoring_group_name, []) | length > 0 @@ -7,8 +7,8 @@ - dashboard_enabled | bool - inventory_hostname in groups[monitoring_group_name] -- name: set grafana_server_addr fact - ipv6 - set_fact: +- name: Set grafana_server_addr fact - ipv6 + ansible.builtin.set_fact: grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap }}" when: - groups.get(monitoring_group_name, []) | length > 0 @@ -16,8 +16,8 @@ - dashboard_enabled | bool - inventory_hostname in groups[monitoring_group_name] -- name: set grafana_server_addrs fact - ipv4 - set_fact: +- name: Set grafana_server_addrs fact - ipv4 + ansible.builtin.set_fact: grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first]) | unique }}" with_items: "{{ groups.get(monitoring_group_name, []) }}" when: @@ -25,8 +25,8 @@ - ip_version == 'ipv4' - dashboard_enabled | bool -- name: set grafana_server_addrs fact - ipv6 - set_fact: +- name: Set grafana_server_addrs fact - ipv6 + ansible.builtin.set_fact: grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap]) | unique }}" with_items: "{{ groups.get(monitoring_group_name, []) }}" when: diff --git a/roles/ceph-facts/tasks/main.yml b/roles/ceph-facts/tasks/main.yml index 48395f8571..ba58f84b9c 100644 --- a/roles/ceph-facts/tasks/main.yml +++ b/roles/ceph-facts/tasks/main.yml @@ -1,7 +1,7 @@ --- -- name: include_tasks convert_grafana_server_group_name.yml - include_tasks: convert_grafana_server_group_name.yml +- name: Include_tasks convert_grafana_server_group_name.yml + ansible.builtin.include_tasks: convert_grafana_server_group_name.yml when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0 -- name: include facts.yml - include_tasks: facts.yml +- name: Include facts.yml + ansible.builtin.include_tasks: facts.yml diff --git a/roles/ceph-facts/tasks/set_monitor_address.yml b/roles/ceph-facts/tasks/set_monitor_address.yml index b1cb346675..8f3aa57b5f 100644 --- a/roles/ceph-facts/tasks/set_monitor_address.yml +++ b/roles/ceph-facts/tasks/set_monitor_address.yml @@ -1,7 +1,7 @@ --- -- name: set_fact _monitor_addresses to monitor_address_block ipv4 - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}" +- name: Set_fact _monitor_addresses to monitor_address_block ipv4 + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first}] }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" @@ -9,9 +9,9 @@ - hostvars[item]['monitor_address_block'] != 'subnet' - ip_version == 'ipv4' -- name: set_fact _monitor_addresses to monitor_address_block ipv6 - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ansible.utils.ipwrap }] }}" +- name: Set_fact _monitor_addresses to monitor_address_block ipv6 + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ansible.utils.ipwrap}] }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" @@ -19,18 +19,18 @@ - hostvars[item]['monitor_address_block'] != 'subnet' - ip_version == 'ipv6' -- name: set_fact _monitor_addresses to monitor_address - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ansible.utils.ipwrap}] }}" +- name: Set_fact _monitor_addresses to monitor_address + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['monitor_address'] | ansible.utils.ipwrap}] }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - hostvars[item]['monitor_address'] is defined - hostvars[item]['monitor_address'] != 'x.x.x.x' -- name: set_fact _monitor_addresses to monitor_interface - ipv4 - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ansible.utils.ipwrap }] }}" +- name: Set_fact _monitor_addresses to monitor_interface - ipv4 + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface'] | replace('-', '_'))][ip_version]['address'] | ansible.utils.ipwrap}] }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" @@ -39,9 +39,9 @@ - hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x' - hostvars[item]['monitor_interface'] | default('interface') != 'interface' -- name: set_fact _monitor_addresses to monitor_interface - ipv6 - set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ansible.utils.ipwrap }] }}" +- name: Set_fact _monitor_addresses to monitor_interface - ipv6 + ansible.builtin.set_fact: + _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface'] | replace('-', '_'))][ip_version][0]['address'] | ansible.utils.ipwrap}] }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" @@ -50,10 +50,10 @@ - hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x' - hostvars[item]['monitor_interface'] | default('interface') != 'interface' -- name: set_fact _current_monitor_address - set_fact: +- name: Set_fact _current_monitor_address + ansible.builtin.set_fact: _current_monitor_address: "{{ item.addr }}" with_items: "{{ _monitor_addresses }}" when: - (inventory_hostname == item.name and not rolling_update | default(False) | bool) - or (rolling_update | default(False) | bool and item.name == groups.get(mon_group_name, [])[0]) \ No newline at end of file + or (rolling_update | default(False) | bool and item.name == groups.get(mon_group_name, [])[0]) diff --git a/roles/ceph-facts/tasks/set_radosgw_address.yml b/roles/ceph-facts/tasks/set_radosgw_address.yml index 7c9cddbf11..f862b9f331 100644 --- a/roles/ceph-facts/tasks/set_radosgw_address.yml +++ b/roles/ceph-facts/tasks/set_radosgw_address.yml @@ -1,52 +1,52 @@ --- -- name: dashboard related tasks +- name: Dashboard related tasks when: ceph_dashboard_call_item is defined block: - - name: set current radosgw_address_block, radosgw_address, radosgw_interface from node "{{ ceph_dashboard_call_item }}" - set_fact: + - name: Set current radosgw_address_block, radosgw_address, radosgw_interface from node "{{ ceph_dashboard_call_item }}" + ansible.builtin.set_fact: radosgw_address_block: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address_block'] | default(radosgw_address_block) }}" radosgw_address: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address'] | default(radosgw_address) }}" radosgw_interface: "{{ hostvars[ceph_dashboard_call_item]['radosgw_interface'] | default(radosgw_interface) }}" -- name: set_fact _radosgw_address to radosgw_address_block ipv4 - set_fact: +- name: Set_fact _radosgw_address to radosgw_address_block ipv4 + ansible.builtin.set_fact: _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}" when: - radosgw_address_block is defined - radosgw_address_block != 'subnet' - ip_version == 'ipv4' -- name: set_fact _radosgw_address to radosgw_address_block ipv6 - set_fact: +- name: Set_fact _radosgw_address to radosgw_address_block ipv6 + ansible.builtin.set_fact: _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ansible.utils.ipwrap }}" when: - radosgw_address_block is defined - radosgw_address_block != 'subnet' - ip_version == 'ipv6' -- name: set_fact _radosgw_address to radosgw_address - set_fact: +- name: Set_fact _radosgw_address to radosgw_address + ansible.builtin.set_fact: _radosgw_address: "{{ radosgw_address | ansible.utils.ipwrap }}" when: - radosgw_address is defined - radosgw_address != 'x.x.x.x' -- name: tasks for radosgw interface +- name: Tasks for radosgw interface when: - radosgw_address_block == 'subnet' - radosgw_address == 'x.x.x.x' - radosgw_interface != 'interface' block: - - name: set_fact _interface - set_fact: + - name: Set_fact _interface + ansible.builtin.set_fact: _interface: "{{ (hostvars[item]['radosgw_interface'] | replace('-', '_')) }}" loop: "{{ groups.get(rgw_group_name, []) }}" delegate_to: "{{ item }}" delegate_facts: true run_once: true - - name: set_fact _radosgw_address to radosgw_interface - ipv4 - set_fact: + - name: Set_fact _radosgw_address to radosgw_interface - ipv4 + ansible.builtin.set_fact: _radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version]['address'] }}" loop: "{{ groups.get(rgw_group_name, []) }}" delegate_to: "{{ item }}" @@ -54,8 +54,8 @@ run_once: true when: ip_version == 'ipv4' - - name: set_fact _radosgw_address to radosgw_interface - ipv6 - set_fact: + - name: Set_fact _radosgw_address to radosgw_interface - ipv6 + ansible.builtin.set_fact: _radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version][0]['address'] | ansible.utils.ipwrap }}" loop: "{{ groups.get(rgw_group_name, []) }}" delegate_to: "{{ item }}" @@ -63,18 +63,18 @@ run_once: true when: ip_version == 'ipv6' -- name: rgw_instances +- name: Rgw_instances when: - ceph_dashboard_call_item is defined or inventory_hostname in groups.get(rgw_group_name, []) block: - - name: reset rgw_instances (workaround) - set_fact: + - name: Reset rgw_instances (workaround) + ansible.builtin.set_fact: rgw_instances: [] - - name: set_fact rgw_instances - set_fact: - rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port|int + item|int }]) }}" - with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }} + - name: Set_fact rgw_instances + ansible.builtin.set_fact: + rgw_instances: "{{ rgw_instances | default([]) | union([{'instance_name': 'rgw' + item | string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port | int + item | int}]) }}" + with_sequence: start=0 end={{ radosgw_num_instances | int - 1 }} delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}" - delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}" + delegate_facts: true diff --git a/roles/ceph-fetch-keys/defaults/main.yml b/roles/ceph-fetch-keys/defaults/main.yml index f8ac0ba5fa..59310811c3 100644 --- a/roles/ceph-fetch-keys/defaults/main.yml +++ b/roles/ceph-fetch-keys/defaults/main.yml @@ -8,4 +8,3 @@ dummy: fetch_directory: fetch/ - diff --git a/roles/ceph-fetch-keys/meta/main.yml b/roles/ceph-fetch-keys/meta/main.yml index 7c7cd2d797..2cb63c8aec 100644 --- a/roles/ceph-fetch-keys/meta/main.yml +++ b/roles/ceph-fetch-keys/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Andrew Schoen description: Fetches ceph keys from monitors. license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-fetch-keys/tasks/main.yml b/roles/ceph-fetch-keys/tasks/main.yml index 6f56738831..dd19e500d4 100644 --- a/roles/ceph-fetch-keys/tasks/main.yml +++ b/roles/ceph-fetch-keys/tasks/main.yml @@ -1,21 +1,22 @@ --- -- name: lookup keys in /etc/ceph - shell: ls -1 /etc/ceph/*.keyring +- name: Lookup keys in /etc/ceph + ansible.builtin.shell: ls -1 /etc/ceph/*.keyring changed_when: false register: ceph_keys -- name: create a local fetch directory if it does not exist - file: +- name: Create a local fetch directory if it does not exist + ansible.builtin.file: path: "{{ fetch_directory }}" state: directory + mode: "0755" delegate_to: localhost become: false -- name: "copy ceph user and bootstrap keys to the ansible server in {{ fetch_directory }}/{{ fsid }}/" - fetch: +- name: Copy ceph user and bootstrap keys to the ansible server + ansible.builtin.fetch: src: "{{ item }}" dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}" - flat: yes + flat: true fail_on_missing: false run_once: true with_items: diff --git a/roles/ceph-grafana/meta/main.yml b/roles/ceph-grafana/meta/main.yml index 76a6bd6fde..1d346cda0c 100644 --- a/roles/ceph-grafana/meta/main.yml +++ b/roles/ceph-grafana/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Boris Ranto description: Configures Grafana for Ceph Dashboard license: Apache - min_ansible_version: 2.4 + min_ansible_version: "2.4" platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-grafana/tasks/configure_grafana.yml b/roles/ceph-grafana/tasks/configure_grafana.yml index 2cb4ed7f48..dad9c7729e 100644 --- a/roles/ceph-grafana/tasks/configure_grafana.yml +++ b/roles/ceph-grafana/tasks/configure_grafana.yml @@ -1,8 +1,8 @@ --- -- name: install ceph-grafana-dashboards package on RedHat or SUSE - package: +- name: Install ceph-grafana-dashboards package on RedHat or SUSE + ansible.builtin.package: name: ceph-grafana-dashboards - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: @@ -10,22 +10,22 @@ - ansible_facts['os_family'] in ['RedHat', 'Suse'] tags: package-install -- name: make sure grafana is down - service: +- name: Make sure grafana is down + ansible.builtin.service: name: grafana-server state: stopped -- name: wait for grafana to be stopped - wait_for: +- name: Wait for grafana to be stopped + ansible.builtin.wait_for: host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}' port: '{{ grafana_port }}' state: stopped -- name: make sure grafana configuration directories exist - file: +- name: Make sure grafana configuration directories exist + ansible.builtin.file: path: "{{ item }}" state: directory - recurse: yes + recurse: true owner: "{{ grafana_uid }}" group: "{{ grafana_uid }}" with_items: @@ -34,82 +34,84 @@ - "/etc/grafana/provisioning/dashboards" - "/etc/grafana/provisioning/notifiers" -- name: download ceph grafana dashboards - get_url: +- name: Download ceph grafana dashboards + ansible.builtin.get_url: url: "https://raw.githubusercontent.com/ceph/ceph/{{ grafana_dashboard_version }}/monitoring/ceph-mixin/dashboards_out/{{ item }}" dest: "/etc/grafana/dashboards/ceph-dashboard/{{ item }}" + mode: "0644" with_items: "{{ grafana_dashboard_files }}" when: - not containerized_deployment | bool - not ansible_facts['os_family'] in ['RedHat', 'Suse'] -- name: write grafana.ini +- name: Write grafana.ini openstack.config_template.config_template: src: grafana.ini.j2 dest: /etc/grafana/grafana.ini owner: "{{ grafana_uid }}" group: "{{ grafana_uid }}" - mode: 0640 + mode: "0640" config_type: ini config_overrides: "{{ grafana_conf_overrides }}" -- name: write datasources provisioning config file - template: +- name: Write datasources provisioning config file + ansible.builtin.template: src: datasources-ceph-dashboard.yml.j2 dest: /etc/grafana/provisioning/datasources/ceph-dashboard.yml owner: "{{ grafana_uid }}" group: "{{ grafana_uid }}" - mode: 0640 + mode: "0640" - name: Write dashboards provisioning config file - template: + ansible.builtin.template: src: dashboards-ceph-dashboard.yml.j2 dest: /etc/grafana/provisioning/dashboards/ceph-dashboard.yml owner: "{{ grafana_uid }}" group: "{{ grafana_uid }}" - mode: 0640 + mode: "0640" when: not containerized_deployment | bool -- name: copy grafana SSL certificate file - copy: +- name: Copy grafana SSL certificate file + ansible.builtin.copy: src: "{{ grafana_crt }}" dest: "/etc/grafana/ceph-dashboard.crt" owner: "{{ grafana_uid }}" group: "{{ grafana_uid }}" - mode: 0640 + mode: "0640" remote_src: "{{ dashboard_tls_external | bool }}" when: - grafana_crt | length > 0 - dashboard_protocol == "https" -- name: copy grafana SSL certificate key - copy: +- name: Copy grafana SSL certificate key + ansible.builtin.copy: src: "{{ grafana_key }}" dest: "/etc/grafana/ceph-dashboard.key" owner: "{{ grafana_uid }}" group: "{{ grafana_uid }}" - mode: 0440 + mode: "0440" remote_src: "{{ dashboard_tls_external | bool }}" when: - grafana_key | length > 0 - dashboard_protocol == "https" -- name: generate a Self Signed OpenSSL certificate for dashboard - shell: | +- name: Generate a Self Signed OpenSSL certificate for dashboard + ansible.builtin.shell: | test -f /etc/grafana/ceph-dashboard.key -a -f /etc/grafana/ceph-dashboard.crt || \ (openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca && \ chown {{ grafana_uid }}:{{ grafana_uid }} /etc/grafana/ceph-dashboard.key /etc/grafana/ceph-dashboard.crt) + changed_when: false when: - dashboard_protocol == "https" - grafana_key | length == 0 or grafana_crt | length == 0 -- name: enable and start grafana - service: +- name: Enable and start grafana + ansible.builtin.service: name: grafana-server state: restarted enabled: true -- name: wait for grafana to start - wait_for: +- name: Wait for grafana to start + ansible.builtin.wait_for: host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}' port: '{{ grafana_port }}' diff --git a/roles/ceph-grafana/tasks/main.yml b/roles/ceph-grafana/tasks/main.yml index d125a51028..2e4903684f 100644 --- a/roles/ceph-grafana/tasks/main.yml +++ b/roles/ceph-grafana/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: include setup_container.yml - include_tasks: setup_container.yml +- name: Include setup_container.yml + ansible.builtin.include_tasks: setup_container.yml -- name: include configure_grafana.yml - include_tasks: configure_grafana.yml +- name: Include configure_grafana.yml + ansible.builtin.include_tasks: configure_grafana.yml diff --git a/roles/ceph-grafana/tasks/setup_container.yml b/roles/ceph-grafana/tasks/setup_container.yml index 666eff780b..f410651b49 100644 --- a/roles/ceph-grafana/tasks/setup_container.yml +++ b/roles/ceph-grafana/tasks/setup_container.yml @@ -1,6 +1,6 @@ --- -- name: create /etc/grafana and /var/lib/grafana - file: +- name: Create /etc/grafana and /var/lib/grafana + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ grafana_uid }}" @@ -10,13 +10,13 @@ - /etc/grafana - /var/lib/grafana -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: start the grafana-server service - systemd: +- name: Start the grafana-server service + ansible.builtin.systemd: name: grafana-server state: started - enabled: yes - daemon_reload: yes + enabled: true + daemon_reload: true failed_when: false diff --git a/roles/ceph-grafana/tasks/systemd.yml b/roles/ceph-grafana/tasks/systemd.yml index d51387058f..52c40b6efc 100644 --- a/roles/ceph-grafana/tasks/systemd.yml +++ b/roles/ceph-grafana/tasks/systemd.yml @@ -1,8 +1,8 @@ --- -- name: ship systemd service - template: +- name: Ship systemd service + ansible.builtin.template: src: grafana-server.service.j2 dest: "/etc/systemd/system/grafana-server.service" owner: root group: root - mode: 0644 + mode: "0644" diff --git a/roles/ceph-handler/handlers/main.yml b/roles/ceph-handler/handlers/main.yml index dd3ef29639..b49276ead5 100644 --- a/roles/ceph-handler/handlers/main.yml +++ b/roles/ceph-handler/handlers/main.yml @@ -1,72 +1,72 @@ --- -- name: handlers +- name: Handlers when: - - not rolling_update | bool - - not docker2podman | default(False) | bool + - not rolling_update | bool + - not docker2podman | default(False) | bool block: - - name: make tempdir for scripts - tempfile: + - name: Make tempdir for scripts + ansible.builtin.tempfile: state: directory prefix: ceph_ansible listen: - - "restart ceph mons" - - "restart ceph osds" - - "restart ceph mdss" - - "restart ceph rgws" - - "restart ceph nfss" - - "restart ceph rbdmirrors" - - "restart ceph mgrs" + - "Restart ceph mons" + - "Restart ceph osds" + - "Restart ceph mdss" + - "Restart ceph rgws" + - "Restart ceph nfss" + - "Restart ceph rbdmirrors" + - "Restart ceph mgrs" register: tmpdirpath when: tmpdirpath is not defined or tmpdirpath.path is not defined or tmpdirpath.state=="absent" - - name: mons handler - include_tasks: handler_mons.yml + - name: Mons handler + ansible.builtin.include_tasks: handler_mons.yml when: mon_group_name in group_names - listen: "restart ceph mons" + listen: "Restart ceph mons" - - name: osds handler - include_tasks: handler_osds.yml + - name: Osds handler + ansible.builtin.include_tasks: handler_osds.yml when: osd_group_name in group_names - listen: "restart ceph osds" + listen: "Restart ceph osds" - - name: mdss handler - include_tasks: handler_mdss.yml + - name: Mdss handler + ansible.builtin.include_tasks: handler_mdss.yml when: mds_group_name in group_names - listen: "restart ceph mdss" + listen: "Restart ceph mdss" - - name: rgws handler - include_tasks: handler_rgws.yml + - name: Rgws handler + ansible.builtin.include_tasks: handler_rgws.yml when: rgw_group_name in group_names - listen: "restart ceph rgws" + listen: "Restart ceph rgws" - - name: nfss handler - include_tasks: handler_nfss.yml + - name: Nfss handler + ansible.builtin.include_tasks: handler_nfss.yml when: nfs_group_name in group_names - listen: "restart ceph nfss" + listen: "Restart ceph nfss" - - name: rbdmirrors handler - include_tasks: handler_rbdmirrors.yml + - name: Rbdmirrors handler + ansible.builtin.include_tasks: handler_rbdmirrors.yml when: rbdmirror_group_name in group_names - listen: "restart ceph rbdmirrors" + listen: "Restart ceph rbdmirrors" - - name: mgrs handler - include_tasks: handler_mgrs.yml + - name: Mgrs handler + ansible.builtin.include_tasks: handler_mgrs.yml when: mgr_group_name in group_names - listen: "restart ceph mgrs" + listen: "Restart ceph mgrs" - - name: tcmu-runner handler - include_tasks: handler_tcmu_runner.yml + - name: Tcmu-runner handler + ansible.builtin.include_tasks: handler_tcmu_runner.yml when: iscsi_gw_group_name in group_names - listen: "restart ceph tcmu-runner" + listen: "Restart ceph tcmu-runner" - - name: rbd-target-api and rbd-target-gw handler - include_tasks: handler_rbd_target_api_gw.yml + - name: Rbd-target-api and rbd-target-gw handler + ansible.builtin.include_tasks: handler_rbd_target_api_gw.yml when: iscsi_gw_group_name in group_names - listen: "restart ceph rbd-target-api-gw" + listen: "Restart ceph rbd-target-api-gw" - - name: ceph crash handler - include_tasks: handler_crash.yml - listen: "restart ceph crash" + - name: Ceph crash handler + ansible.builtin.include_tasks: handler_crash.yml + listen: "Restart ceph crash" when: - inventory_hostname in groups.get(mon_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) @@ -75,17 +75,17 @@ or inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(rbdmirror_group_name, []) - - name: remove tempdir for scripts - file: + - name: Remove tempdir for scripts + ansible.builtin.file: path: "{{ tmpdirpath.path }}" state: absent listen: - - "restart ceph mons" - - "restart ceph osds" - - "restart ceph mdss" - - "restart ceph rgws" - - "restart ceph nfss" - - "restart ceph rbdmirrors" - - "restart ceph mgrs" + - "Restart ceph mons" + - "Restart ceph osds" + - "Restart ceph mdss" + - "Restart ceph rgws" + - "Restart ceph nfss" + - "Restart ceph rbdmirrors" + - "Restart ceph mgrs" register: tmpdirpath when: tmpdirpath.path is defined diff --git a/roles/ceph-handler/meta/main.yml b/roles/ceph-handler/meta/main.yml index 58138fa87b..fe745e01a2 100644 --- a/roles/ceph-handler/meta/main.yml +++ b/roles/ceph-handler/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Contains handlers for Ceph services license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-handler/tasks/check_running_cluster.yml b/roles/ceph-handler/tasks/check_running_cluster.yml index 633536d035..62af95a53f 100644 --- a/roles/ceph-handler/tasks/check_running_cluster.yml +++ b/roles/ceph-handler/tasks/check_running_cluster.yml @@ -1,8 +1,8 @@ --- -- name: include check_running_containers.yml - include_tasks: check_running_containers.yml +- name: Include check_running_containers.yml + ansible.builtin.include_tasks: check_running_containers.yml when: containerized_deployment | bool -- name: include check_socket_non_container.yml - include_tasks: check_socket_non_container.yml +- name: Include check_socket_non_container.yml + ansible.builtin.include_tasks: check_socket_non_container.yml when: not containerized_deployment | bool diff --git a/roles/ceph-handler/tasks/check_running_containers.yml b/roles/ceph-handler/tasks/check_running_containers.yml index bcb98f857b..1775120e2a 100644 --- a/roles/ceph-handler/tasks/check_running_containers.yml +++ b/roles/ceph-handler/tasks/check_running_containers.yml @@ -1,94 +1,94 @@ --- -- name: check for a mon container - command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'" +- name: Check for a mon container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'" register: ceph_mon_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(mon_group_name, []) -- name: check for an osd container - command: "{{ container_binary }} ps -q --filter='name=ceph-osd'" +- name: Check for an osd container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-osd'" register: ceph_osd_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(osd_group_name, []) -- name: check for a mds container - command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'" +- name: Check for a mds container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'" register: ceph_mds_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(mds_group_name, []) -- name: check for a rgw container - command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'" +- name: Check for a rgw container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'" register: ceph_rgw_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(rgw_group_name, []) -- name: check for a mgr container - command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'" +- name: Check for a mgr container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'" register: ceph_mgr_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(mgr_group_name, []) -- name: check for a rbd mirror container - command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'" +- name: Check for a rbd mirror container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'" register: ceph_rbd_mirror_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(rbdmirror_group_name, []) -- name: check for a nfs container - command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'" +- name: Check for a nfs container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'" register: ceph_nfs_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(nfs_group_name, []) -- name: check for a tcmu-runner container - command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'" +- name: Check for a tcmu-runner container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'" register: ceph_tcmu_runner_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: check for a rbd-target-api container - command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'" +- name: Check for a rbd-target-api container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'" register: ceph_rbd_target_api_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: check for a rbd-target-gw container - command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'" +- name: Check for a rbd-target-gw container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'" register: ceph_rbd_target_gw_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: check for a ceph-crash container - command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'" +- name: Check for a ceph-crash container + ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'" register: ceph_crash_container_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: - inventory_hostname in groups.get(mon_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(osd_group_name, []) or inventory_hostname in groups.get(mds_group_name, []) or inventory_hostname in groups.get(rgw_group_name, []) - or inventory_hostname in groups.get(rbdmirror_group_name, []) \ No newline at end of file + or inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-handler/tasks/check_socket_non_container.yml b/roles/ceph-handler/tasks/check_socket_non_container.yml index c80e53f2d5..79f2219826 100644 --- a/roles/ceph-handler/tasks/check_socket_non_container.yml +++ b/roles/ceph-handler/tasks/check_socket_non_container.yml @@ -1,27 +1,27 @@ --- -- name: find ceph mon socket - find: +- name: Find ceph mon socket + ansible.builtin.find: paths: ["{{ rbd_client_admin_socket_path }}"] - recurse: yes + recurse: true file_type: any patterns: "{{ cluster }}-mon*.asok" - use_regex: no + use_regex: false register: mon_socket_stat when: inventory_hostname in groups.get(mon_group_name, []) -- name: check if the ceph mon socket is in-use - command: grep -q {{ item.path }} /proc/net/unix +- name: Check if the ceph mon socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: mon_socket with_items: "{{ mon_socket_stat.files }}" when: - inventory_hostname in groups.get(mon_group_name, []) - mon_socket_stat.files | length > 0 -- name: remove ceph mon socket if exists and not used by a process - file: +- name: Remove ceph mon socket if exists and not used by a process + ansible.builtin.file: name: "{{ item.0.path }}" state: absent with_together: @@ -32,29 +32,29 @@ - mon_socket_stat.files | length > 0 - item.1.rc == 1 -- name: find ceph osd socket - find: +- name: Find ceph osd socket + ansible.builtin.find: paths: ["{{ rbd_client_admin_socket_path }}"] - recurse: yes + recurse: true file_type: any patterns: "{{ cluster }}-osd.*.asok" - use_regex: no + use_regex: false register: osd_socket_stat when: inventory_hostname in groups.get(osd_group_name, []) -- name: check if the ceph osd socket is in-use - command: grep -q {{ item.path }} /proc/net/unix +- name: Check if the ceph osd socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: osd_socket with_items: "{{ osd_socket_stat.files }}" when: - inventory_hostname in groups.get(osd_group_name, []) - osd_socket_stat.files | length > 0 -- name: remove ceph osd socket if exists and not used by a process - file: +- name: Remove ceph osd socket if exists and not used by a process + ansible.builtin.file: name: "{{ item.0.path }}" state: absent with_together: @@ -65,29 +65,29 @@ - osd_socket_stat.files | length > 0 - item.1.rc == 1 -- name: find ceph osd socket - find: +- name: Find ceph osd socket + ansible.builtin.find: paths: ["{{ rbd_client_admin_socket_path }}"] - recurse: yes + recurse: true file_type: any patterns: "{{ cluster }}-mds*.asok" - use_regex: no + use_regex: false register: mds_socket_stat when: inventory_hostname in groups.get(mds_group_name, []) -- name: check if the ceph mds socket is in-use - command: grep -q {{ item.path }} /proc/net/unix +- name: Check if the ceph mds socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: mds_socket with_items: "{{ mds_socket_stat.files }}" when: - inventory_hostname in groups.get(mds_group_name, []) - mds_socket_stat.files | length > 0 -- name: remove ceph mds socket if exists and not used by a process - file: +- name: Remove ceph mds socket if exists and not used by a process + ansible.builtin.file: name: "{{ item.0.path }}" state: absent with_together: @@ -98,29 +98,29 @@ - mds_socket_stat.files | length > 0 - item.1.rc == 1 -- name: find ceph rgw socket - find: +- name: Find ceph rgw socket + ansible.builtin.find: paths: ["{{ rbd_client_admin_socket_path }}"] - recurse: yes + recurse: true file_type: any patterns: "{{ cluster }}-client.rgw*.asok" - use_regex: no + use_regex: false register: rgw_socket_stat when: inventory_hostname in groups.get(rgw_group_name, []) -- name: check if the ceph rgw socket is in-use - command: grep -q {{ item.path }} /proc/net/unix +- name: Check if the ceph rgw socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: rgw_socket with_items: "{{ rgw_socket_stat.files }}" when: - inventory_hostname in groups.get(rgw_group_name, []) - rgw_socket_stat.files | length > 0 -- name: remove ceph rgw socket if exists and not used by a process - file: +- name: Remove ceph rgw socket if exists and not used by a process + ansible.builtin.file: name: "{{ item.0.path }}" state: absent with_together: @@ -131,29 +131,29 @@ - rgw_socket_stat.files | length > 0 - item.1.rc == 1 -- name: find ceph mgr socket - find: +- name: Find ceph mgr socket + ansible.builtin.find: paths: ["{{ rbd_client_admin_socket_path }}"] - recurse: yes + recurse: true file_type: any patterns: "{{ cluster }}-mgr*.asok" - use_regex: no + use_regex: false register: mgr_socket_stat when: inventory_hostname in groups.get(mgr_group_name, []) -- name: check if the ceph mgr socket is in-use - command: grep -q {{ item.path }} /proc/net/unix +- name: Check if the ceph mgr socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: mgr_socket with_items: "{{ mgr_socket_stat.files }}" when: - inventory_hostname in groups.get(mgr_group_name, []) - mgr_socket_stat.files | length > 0 -- name: remove ceph mgr socket if exists and not used by a process - file: +- name: Remove ceph mgr socket if exists and not used by a process + ansible.builtin.file: name: "{{ item.0.path }}" state: absent with_together: @@ -164,29 +164,29 @@ - mgr_socket_stat.files | length > 0 - item.1.rc == 1 -- name: find ceph rbd mirror socket - find: +- name: Find ceph rbd mirror socket + ansible.builtin.find: paths: ["{{ rbd_client_admin_socket_path }}"] - recurse: yes + recurse: true file_type: any patterns: "{{ cluster }}-client.rbd-mirror*.asok" - use_regex: no + use_regex: false register: rbd_mirror_socket_stat when: inventory_hostname in groups.get(rbdmirror_group_name, []) -- name: check if the ceph rbd mirror socket is in-use - command: grep -q {{ item.path }} /proc/net/unix +- name: Check if the ceph rbd mirror socket is in-use + ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix changed_when: false failed_when: false - check_mode: no + check_mode: false register: rbd_mirror_socket with_items: "{{ rbd_mirror_socket_stat.files }}" when: - inventory_hostname in groups.get(rbdmirror_group_name, []) - rbd_mirror_socket_stat.files | length > 0 -- name: remove ceph rbd mirror socket if exists and not used by a process - file: +- name: Remove ceph rbd mirror socket if exists and not used by a process + ansible.builtin.file: name: "{{ item.0.path }}" state: absent with_together: @@ -197,43 +197,43 @@ - rbd_mirror_socket_stat.files | length > 0 - item.1.rc == 1 -- name: check for a nfs ganesha pid - command: "pgrep ganesha.nfsd" +- name: Check for a nfs ganesha pid + ansible.builtin.command: "pgrep ganesha.nfsd" register: nfs_process changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(nfs_group_name, []) -- name: check for a tcmu-runner - command: "pgrep tcmu-runner" +- name: Check for a tcmu-runner + ansible.builtin.command: "pgrep tcmu-runner" register: ceph_tcmu_runner_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: check for a rbd-target-api - command: "pgrep rbd-target-api" +- name: Check for a rbd-target-api + ansible.builtin.command: "pgrep rbd-target-api" register: ceph_rbd_target_api_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: check for a rbd-target-gw - command: "pgrep name=rbd-target-gw" +- name: Check for a rbd-target-gw + ansible.builtin.command: "pgrep name=rbd-target-gw" register: ceph_rbd_target_gw_stat changed_when: false failed_when: false - check_mode: no + check_mode: false when: inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: check for a ceph-crash process - command: pgrep ceph-crash +- name: Check for a ceph-crash process + ansible.builtin.command: pgrep ceph-crash changed_when: false failed_when: false - check_mode: no + check_mode: false register: crash_process when: - inventory_hostname in groups.get(mon_group_name, []) @@ -241,4 +241,4 @@ or inventory_hostname in groups.get(osd_group_name, []) or inventory_hostname in groups.get(mds_group_name, []) or inventory_hostname in groups.get(rgw_group_name, []) - or inventory_hostname in groups.get(rbdmirror_group_name, []) \ No newline at end of file + or inventory_hostname in groups.get(rbdmirror_group_name, []) diff --git a/roles/ceph-handler/tasks/handler_crash.yml b/roles/ceph-handler/tasks/handler_crash.yml index b4039b10e6..44b049bcc0 100644 --- a/roles/ceph-handler/tasks/handler_crash.yml +++ b/roles/ceph-handler/tasks/handler_crash.yml @@ -1,18 +1,18 @@ --- -- name: set _crash_handler_called before restart - set_fact: - _crash_handler_called: True +- name: Set _crash_handler_called before restart + ansible.builtin.set_fact: + _crash_handler_called: true -- name: restart the ceph-crash service - systemd: +- name: Restart the ceph-crash service # noqa: ignore-errors + ansible.builtin.systemd: name: ceph-crash@{{ ansible_facts['hostname'] }} state: restarted - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true ignore_errors: true when: hostvars[inventory_hostname]['_crash_handler_called'] | default(False) | bool -- name: set _crash_handler_called after restart - set_fact: - _crash_handler_called: False +- name: Set _crash_handler_called after restart + ansible.builtin.set_fact: + _crash_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_mdss.yml b/roles/ceph-handler/tasks/handler_mdss.yml index 4043844f22..9e82d54835 100644 --- a/roles/ceph-handler/tasks/handler_mdss.yml +++ b/roles/ceph-handler/tasks/handler_mdss.yml @@ -1,27 +1,28 @@ --- -- name: set _mds_handler_called before restart - set_fact: - _mds_handler_called: True +- name: Set _mds_handler_called before restart + ansible.builtin.set_fact: + _mds_handler_called: true -- name: copy mds restart script - template: +- name: Copy mds restart script + ansible.builtin.template: src: restart_mds_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_mds_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined -- name: restart ceph mds daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh +- name: Restart ceph mds daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh when: - hostvars[item]['handler_mds_status'] | default(False) | bool - hostvars[item]['_mds_handler_called'] | default(False) | bool - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[mds_group_name] }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true -- name: set _mds_handler_called after restart - set_fact: - _mds_handler_called: False +- name: Set _mds_handler_called after restart + ansible.builtin.set_fact: + _mds_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_mgrs.yml b/roles/ceph-handler/tasks/handler_mgrs.yml index 35f1d40da5..7984cc54d7 100644 --- a/roles/ceph-handler/tasks/handler_mgrs.yml +++ b/roles/ceph-handler/tasks/handler_mgrs.yml @@ -1,27 +1,28 @@ --- -- name: set _mgr_handler_called before restart - set_fact: - _mgr_handler_called: True +- name: Set _mgr_handler_called before restart + ansible.builtin.set_fact: + _mgr_handler_called: true -- name: copy mgr restart script - template: +- name: Copy mgr restart script + ansible.builtin.template: src: restart_mgr_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_mgr_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined -- name: restart ceph mgr daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh +- name: Restart ceph mgr daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh when: - hostvars[item]['handler_mgr_status'] | default(False) | bool - hostvars[item]['_mgr_handler_called'] | default(False) | bool - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[mgr_group_name] }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true -- name: set _mgr_handler_called after restart - set_fact: - _mgr_handler_called: False +- name: Set _mgr_handler_called after restart + ansible.builtin.set_fact: + _mgr_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_mons.yml b/roles/ceph-handler/tasks/handler_mons.yml index 112eb13c8d..0c970527a4 100644 --- a/roles/ceph-handler/tasks/handler_mons.yml +++ b/roles/ceph-handler/tasks/handler_mons.yml @@ -2,21 +2,21 @@ # We only want to restart on hosts that have called the handler. # This var is set when he handler is called, and unset after the # restart to ensure only the correct hosts are restarted. -- name: set _mon_handler_called before restart - set_fact: - _mon_handler_called: True +- name: Set _mon_handler_called before restart + ansible.builtin.set_fact: + _mon_handler_called: true -- name: copy mon restart script - template: +- name: Copy mon restart script + ansible.builtin.template: src: restart_mon_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_mon_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined -- name: restart ceph mon daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mon_daemon.sh +- name: Restart ceph mon daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mon_daemon.sh when: # We do not want to run these checks on initial deployment (`socket.rc == 0`) - hostvars[item]['handler_mon_status'] | default(False) | bool @@ -24,8 +24,9 @@ - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[mon_group_name] }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true -- name: set _mon_handler_called after restart - set_fact: - _mon_handler_called: False +- name: Set _mon_handler_called after restart + ansible.builtin.set_fact: + _mon_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_nfss.yml b/roles/ceph-handler/tasks/handler_nfss.yml index 65df7b7566..dadfc1d8c6 100644 --- a/roles/ceph-handler/tasks/handler_nfss.yml +++ b/roles/ceph-handler/tasks/handler_nfss.yml @@ -1,27 +1,28 @@ --- -- name: set _nfs_handler_called before restart - set_fact: - _nfs_handler_called: True +- name: Set _nfs_handler_called before restart + ansible.builtin.set_fact: + _nfs_handler_called: true -- name: copy nfs restart script - template: +- name: Copy nfs restart script + ansible.builtin.template: src: restart_nfs_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined -- name: restart ceph nfs daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh +- name: Restart ceph nfs daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh when: - hostvars[item]['handler_nfs_status'] | default(False) | bool - hostvars[item]['_nfs_handler_called'] | default(False) | bool - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[nfs_group_name] }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true -- name: set _nfs_handler_called after restart - set_fact: - _nfs_handler_called: False +- name: Set _nfs_handler_called after restart + ansible.builtin.set_fact: + _nfs_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_osds.yml b/roles/ceph-handler/tasks/handler_osds.yml index cc62deecc0..6d2634f002 100644 --- a/roles/ceph-handler/tasks/handler_osds.yml +++ b/roles/ceph-handler/tasks/handler_osds.yml @@ -1,19 +1,19 @@ --- -- name: set_fact trigger_restart - set_fact: +- name: Set_fact trigger_restart + ansible.builtin.set_fact: trigger_restart: true loop: "{{ groups[osd_group_name] }}" when: hostvars[item]['handler_osd_status'] | default(False) | bool run_once: true -- name: osd handler +- name: Osd handler when: trigger_restart | default(False) | bool block: - - name: set _osd_handler_called before restart - set_fact: - _osd_handler_called: True + - name: Set _osd_handler_called before restart + ansible.builtin.set_fact: + _osd_handler_called: true - - name: unset noup flag + - name: Unset noup flag ceph_osd_flag: name: noup cluster: "{{ cluster }}" @@ -30,45 +30,45 @@ # This does not need to run during a rolling update as the playbook will # restart all OSDs using the tasks "start ceph osd" or # "restart containerized ceph osd" - - name: copy osd restart script - template: + - name: Copy osd restart script + ansible.builtin.template: src: restart_osd_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_osd_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined - - name: get pool list - command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" + - name: Get pool list + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json" register: pool_list delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" run_once: true changed_when: false check_mode: false - - name: get balancer module status - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" + - name: Get balancer module status + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json" register: balancer_status run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false check_mode: false - - name: set_fact pools_pgautoscaler_mode - set_fact: + - name: Set_fact pools_pgautoscaler_mode + ansible.builtin.set_fact: pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}" run_once: true with_items: "{{ pool_list.stdout | default('{}') | from_json }}" - - name: disable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" + - name: Disable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false when: (balancer_status.stdout | from_json)['active'] | bool - - name: disable pg autoscale on pools + - name: Disable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -83,8 +83,8 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: restart ceph osds daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_osd_daemon.sh + - name: Restart ceph osds daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_osd_daemon.sh when: - hostvars[item]['handler_osd_status'] | default(False) | bool - handler_health_osd_check | bool @@ -92,13 +92,14 @@ - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true - - name: set _osd_handler_called after restart - set_fact: - _osd_handler_called: False + - name: Set _osd_handler_called after restart + ansible.builtin.set_fact: + _osd_handler_called: false - - name: re-enable pg autoscale on pools + - name: Re-enable pg autoscale on pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -113,8 +114,8 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: re-enable balancer - command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" + - name: Re-enable balancer + ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on" run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" changed_when: false diff --git a/roles/ceph-handler/tasks/handler_rbd_target_api_gw.yml b/roles/ceph-handler/tasks/handler_rbd_target_api_gw.yml index 903d333f70..ff7c107ed9 100644 --- a/roles/ceph-handler/tasks/handler_rbd_target_api_gw.yml +++ b/roles/ceph-handler/tasks/handler_rbd_target_api_gw.yml @@ -1,10 +1,10 @@ --- -- name: set _rbd_target_api_handler_called before restart - set_fact: - _rbd_target_api_handler_called: True +- name: Set _rbd_target_api_handler_called before restart + ansible.builtin.set_fact: + _rbd_target_api_handler_called: true -- name: restart rbd-target-api - service: +- name: Restart rbd-target-api + ansible.builtin.service: name: rbd-target-api state: restarted when: @@ -13,18 +13,18 @@ - ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0 with_items: "{{ groups[iscsi_gw_group_name] }}" delegate_to: "{{ item }}" - run_once: True + run_once: true -- name: set _rbd_target_api_handler_called after restart - set_fact: - _rbd_target_api_handler_called: False +- name: Set _rbd_target_api_handler_called after restart + ansible.builtin.set_fact: + _rbd_target_api_handler_called: false -- name: set _rbd_target_gw_handler_called before restart - set_fact: - _rbd_target_gw_handler_called: True +- name: Set _rbd_target_gw_handler_called before restart + ansible.builtin.set_fact: + _rbd_target_gw_handler_called: true -- name: restart rbd-target-gw - service: +- name: Restart rbd-target-gw + ansible.builtin.service: name: rbd-target-gw state: restarted when: @@ -33,8 +33,8 @@ - ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0 with_items: "{{ groups[iscsi_gw_group_name] }}" delegate_to: "{{ item }}" - run_once: True + run_once: true -- name: set _rbd_target_gw_handler_called after restart - set_fact: - _rbd_target_gw_handler_called: False +- name: Set _rbd_target_gw_handler_called after restart + ansible.builtin.set_fact: + _rbd_target_gw_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_rbdmirrors.yml b/roles/ceph-handler/tasks/handler_rbdmirrors.yml index f091981556..0e1c893220 100644 --- a/roles/ceph-handler/tasks/handler_rbdmirrors.yml +++ b/roles/ceph-handler/tasks/handler_rbdmirrors.yml @@ -1,27 +1,28 @@ --- -- name: set _rbdmirror_handler_called before restart - set_fact: - _rbdmirror_handler_called: True +- name: Set _rbdmirror_handler_called before restart + ansible.builtin.set_fact: + _rbdmirror_handler_called: true -- name: copy rbd mirror restart script - template: +- name: Copy rbd mirror restart script + ansible.builtin.template: src: restart_rbd_mirror_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_rbd_mirror_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined -- name: restart ceph rbd mirror daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rbd_mirror_daemon.sh +- name: Restart ceph rbd mirror daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rbd_mirror_daemon.sh when: - hostvars[item]['handler_rbd_mirror_status'] | default(False) | bool - hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[rbdmirror_group_name] }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true -- name: set _rbdmirror_handler_called after restart - set_fact: - _rbdmirror_handler_called: False +- name: Set _rbdmirror_handler_called after restart + ansible.builtin.set_fact: + _rbdmirror_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_rgws.yml b/roles/ceph-handler/tasks/handler_rgws.yml index 4e11f3da32..2929c3d6be 100644 --- a/roles/ceph-handler/tasks/handler_rgws.yml +++ b/roles/ceph-handler/tasks/handler_rgws.yml @@ -1,27 +1,28 @@ --- -- name: set _rgw_handler_called before restart - set_fact: - _rgw_handler_called: True +- name: Set _rgw_handler_called before restart + ansible.builtin.set_fact: + _rgw_handler_called: true -- name: copy rgw restart script - template: +- name: Copy rgw restart script + ansible.builtin.template: src: restart_rgw_daemon.sh.j2 dest: "{{ tmpdirpath.path }}/restart_rgw_daemon.sh" owner: root group: root - mode: 0750 + mode: "0750" when: tmpdirpath.path is defined -- name: restart ceph rgw daemon(s) - command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh +- name: Restart ceph rgw daemon(s) + ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh when: - hostvars[item]['handler_rgw_status'] | default(False) | bool - hostvars[item]['_rgw_handler_called'] | default(False) | bool - hostvars[item].tmpdirpath.path is defined with_items: "{{ groups[rgw_group_name] }}" delegate_to: "{{ item }}" - run_once: True + changed_when: false + run_once: true -- name: set _rgw_handler_called after restart - set_fact: - _rgw_handler_called: False +- name: Set _rgw_handler_called after restart + ansible.builtin.set_fact: + _rgw_handler_called: false diff --git a/roles/ceph-handler/tasks/handler_tcmu_runner.yml b/roles/ceph-handler/tasks/handler_tcmu_runner.yml index 3dd31e0e9b..77a4b19515 100644 --- a/roles/ceph-handler/tasks/handler_tcmu_runner.yml +++ b/roles/ceph-handler/tasks/handler_tcmu_runner.yml @@ -1,10 +1,10 @@ --- -- name: set _tcmu_runner_handler_called before restart - set_fact: - _tcmu_runner_handler_called: True +- name: Set _tcmu_runner_handler_called before restart + ansible.builtin.set_fact: + _tcmu_runner_handler_called: true -- name: restart tcmu-runner - service: +- name: Restart tcmu-runner + ansible.builtin.service: name: tcmu-runner state: restarted when: @@ -13,8 +13,8 @@ - ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0 with_items: "{{ groups[iscsi_gw_group_name] }}" delegate_to: "{{ item }}" - run_once: True + run_once: true -- name: set _tcmu_runner_handler_called after restart - set_fact: - _tcmu_runner_handler_called: False +- name: Set _tcmu_runner_handler_called after restart + ansible.builtin.set_fact: + _tcmu_runner_handler_called: false diff --git a/roles/ceph-handler/tasks/main.yml b/roles/ceph-handler/tasks/main.yml index e2ece6d141..4ea3bef6d6 100644 --- a/roles/ceph-handler/tasks/main.yml +++ b/roles/ceph-handler/tasks/main.yml @@ -1,45 +1,45 @@ --- -- name: include check_running_cluster.yml - include: check_running_cluster.yml +- name: Include check_running_cluster.yml + ansible.builtin.include_tasks: check_running_cluster.yml # We do not want to run these checks on initial deployment (`socket.rc == 0`) -- name: set_fact handler_mon_status - set_fact: +- name: Set_fact handler_mon_status + ansible.builtin.set_fact: handler_mon_status: "{{ 0 in (mon_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mon_container_stat.get('rc') == 0 and ceph_mon_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(mon_group_name, []) -- name: set_fact handler_osd_status - set_fact: +- name: Set_fact handler_osd_status + ansible.builtin.set_fact: handler_osd_status: "{{ 0 in (osd_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_osd_container_stat.get('rc') == 0 and ceph_osd_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(osd_group_name, []) -- name: set_fact handler_mds_status - set_fact: +- name: Set_fact handler_mds_status + ansible.builtin.set_fact: handler_mds_status: "{{ 0 in (mds_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mds_container_stat.get('rc') == 0 and ceph_mds_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(mds_group_name, []) -- name: set_fact handler_rgw_status - set_fact: +- name: Set_fact handler_rgw_status + ansible.builtin.set_fact: handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(rgw_group_name, []) -- name: set_fact handler_nfs_status - set_fact: +- name: Set_fact handler_nfs_status + ansible.builtin.set_fact: handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(nfs_group_name, []) -- name: set_fact handler_rbd_status - set_fact: +- name: Set_fact handler_rbd_status + ansible.builtin.set_fact: handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(rbdmirror_group_name, []) -- name: set_fact handler_mgr_status - set_fact: +- name: Set_fact handler_mgr_status + ansible.builtin.set_fact: handler_mgr_status: "{{ 0 in (mgr_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mgr_container_stat.get('rc') == 0 and ceph_mgr_container_stat.get('stdout_lines', []) | length != 0) }}" when: inventory_hostname in groups.get(mgr_group_name, []) -- name: set_fact handler_crash_status - set_fact: +- name: Set_fact handler_crash_status + ansible.builtin.set_fact: handler_crash_status: "{{ crash_process.get('rc') == 0 if not containerized_deployment | bool else (ceph_crash_container_stat.get('rc') == 0 and ceph_crash_container_stat.get('stdout_lines', []) | length != 0) }}" when: - inventory_hostname in groups.get(mon_group_name, []) diff --git a/roles/ceph-infra/handlers/main.yml b/roles/ceph-infra/handlers/main.yml index d18b5d9553..a2a06d47b5 100644 --- a/roles/ceph-infra/handlers/main.yml +++ b/roles/ceph-infra/handlers/main.yml @@ -1,21 +1,21 @@ --- -- name: disable ntpd +- name: Disable ntpd failed_when: false - service: + ansible.builtin.service: name: '{{ ntp_service_name }}' state: stopped - enabled: no + enabled: false -- name: disable chronyd +- name: Disable chronyd failed_when: false - service: + ansible.builtin.service: name: '{{ chrony_daemon_name }}' - enabled: no + enabled: false state: stopped -- name: disable timesyncd +- name: Disable timesyncd failed_when: false - service: + ansible.builtin.service: name: timesyncd - enabled: no + enabled: false state: stopped diff --git a/roles/ceph-infra/meta/main.yml b/roles/ceph-infra/meta/main.yml index 8bd3009217..67bd7ce182 100644 --- a/roles/ceph-infra/meta/main.yml +++ b/roles/ceph-infra/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Guillaume Abrioux description: Handles ceph infra requirements (ntp, firewall, ...) license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-infra/tasks/configure_firewall.yml b/roles/ceph-infra/tasks/configure_firewall.yml index 8a0423ce20..b62bb936e7 100644 --- a/roles/ceph-infra/tasks/configure_firewall.yml +++ b/roles/ceph-infra/tasks/configure_firewall.yml @@ -1,34 +1,35 @@ --- -- name: check firewalld installation on redhat or SUSE/openSUSE - command: rpm -q firewalld # noqa [303] +- name: Check firewalld installation on redhat or SUSE/openSUSE + ansible.builtin.command: rpm -q firewalld # noqa command-instead-of-module register: firewalld_pkg_query ignore_errors: true - check_mode: no + check_mode: false changed_when: false tags: firewall -- when: (firewalld_pkg_query.get('rc', 1) == 0 - or is_atomic | bool) +- name: Configuring firewalld + when: (firewalld_pkg_query.get('rc', 1) == 0 + or is_atomic | bool) tags: firewall block: - - name: install firewalld python binding - package: + - name: Install firewalld python binding + ansible.builtin.package: name: "python{{ ansible_facts['python']['version']['major'] }}-firewall" tags: with_pkg when: not is_atomic | bool - - name: start firewalld - service: + - name: Start firewalld + ansible.builtin.service: name: firewalld state: started - enabled: yes + enabled: true register: result retries: 5 delay: 3 until: result is succeeded - - name: open ceph networks on monitor - firewalld: + - name: Open ceph networks on monitor + ansible.posix.firewalld: zone: "{{ ceph_mon_firewall_zone }}" source: "{{ item }}" permanent: true @@ -39,8 +40,8 @@ - mon_group_name is defined - mon_group_name in group_names - - name: open ceph networks on manager when collocated - firewalld: + - name: Open ceph networks on manager when collocated + ansible.posix.firewalld: zone: "{{ ceph_mgr_firewall_zone }}" source: "{{ item }}" permanent: true @@ -52,8 +53,8 @@ - mon_group_name in group_names - mgr_group_name | length == 0 - - name: open monitor and manager ports - firewalld: + - name: Open monitor and manager ports + ansible.posix.firewalld: service: "{{ item.service }}" zone: "{{ item.zone }}" permanent: true @@ -66,8 +67,8 @@ - mon_group_name is defined - mon_group_name in group_names - - name: open ceph networks on manager when dedicated - firewalld: + - name: Open ceph networks on manager when dedicated + ansible.posix.firewalld: zone: "{{ ceph_mgr_firewall_zone }}" source: "{{ item }}" permanent: true @@ -79,8 +80,8 @@ - mgr_group_name in group_names - mgr_group_name | length > 0 - - name: open manager ports - firewalld: + - name: Open manager ports + ansible.posix.firewalld: service: ceph zone: "{{ ceph_mgr_firewall_zone }}" permanent: true @@ -90,8 +91,8 @@ - mgr_group_name is defined - mgr_group_name in group_names - - name: open ceph networks on osd - firewalld: + - name: Open ceph networks on osd + ansible.posix.firewalld: zone: "{{ ceph_osd_firewall_zone }}" source: "{{ item }}" permanent: true @@ -102,8 +103,8 @@ - osd_group_name is defined - osd_group_name in group_names - - name: open osd ports - firewalld: + - name: Open osd ports + ansible.posix.firewalld: service: ceph zone: "{{ ceph_osd_firewall_zone }}" permanent: true @@ -113,8 +114,8 @@ - osd_group_name is defined - osd_group_name in group_names - - name: open ceph networks on rgw - firewalld: + - name: Open ceph networks on rgw + ansible.posix.firewalld: zone: "{{ ceph_rgw_firewall_zone }}" source: "{{ item }}" permanent: true @@ -125,8 +126,8 @@ - rgw_group_name is defined - rgw_group_name in group_names - - name: open rgw ports - firewalld: + - name: Open rgw ports + ansible.posix.firewalld: port: "{{ item.radosgw_frontend_port }}/tcp" zone: "{{ ceph_rgw_firewall_zone }}" permanent: true @@ -137,8 +138,8 @@ - rgw_group_name is defined - rgw_group_name in group_names - - name: open ceph networks on mds - firewalld: + - name: Open ceph networks on mds + ansible.posix.firewalld: zone: "{{ ceph_mds_firewall_zone }}" source: "{{ item }}" permanent: true @@ -149,8 +150,8 @@ - mds_group_name is defined - mds_group_name in group_names - - name: open mds ports - firewalld: + - name: Open mds ports + ansible.posix.firewalld: service: ceph zone: "{{ ceph_mds_firewall_zone }}" permanent: true @@ -161,8 +162,8 @@ - mds_group_name is defined - mds_group_name in group_names - - name: open ceph networks on nfs - firewalld: + - name: Open ceph networks on nfs + ansible.posix.firewalld: zone: "{{ ceph_nfs_firewall_zone }}" source: "{{ item }}" permanent: true @@ -173,8 +174,8 @@ - nfs_group_name is defined - nfs_group_name in group_names - - name: open nfs ports - firewalld: + - name: Open nfs ports + ansible.posix.firewalld: service: nfs zone: "{{ ceph_nfs_firewall_zone }}" permanent: true @@ -184,8 +185,8 @@ - nfs_group_name is defined - nfs_group_name in group_names - - name: open nfs ports (portmapper) - firewalld: + - name: Open nfs ports (portmapper) + ansible.posix.firewalld: port: "111/tcp" zone: "{{ ceph_nfs_firewall_zone }}" permanent: true @@ -195,8 +196,8 @@ - nfs_group_name is defined - nfs_group_name in group_names - - name: open ceph networks on rbdmirror - firewalld: + - name: Open ceph networks on rbdmirror + ansible.posix.firewalld: zone: "{{ ceph_rbdmirror_firewall_zone }}" source: "{{ item }}" permanent: true @@ -207,8 +208,8 @@ - rbdmirror_group_name is defined - rbdmirror_group_name in group_names - - name: open rbdmirror ports - firewalld: + - name: Open rbdmirror ports + ansible.posix.firewalld: service: ceph zone: "{{ ceph_rbdmirror_firewall_zone }}" permanent: true @@ -218,8 +219,8 @@ - rbdmirror_group_name is defined - rbdmirror_group_name in group_names - - name: open ceph networks on iscsi - firewalld: + - name: Open ceph networks on iscsi + ansible.posix.firewalld: zone: "{{ ceph_iscsi_firewall_zone }}" source: "{{ item }}" permanent: true @@ -230,8 +231,8 @@ - iscsi_gw_group_name is defined - iscsi_gw_group_name in group_names - - name: open iscsi target ports - firewalld: + - name: Open iscsi target ports + ansible.posix.firewalld: port: "3260/tcp" zone: "{{ ceph_iscsi_firewall_zone }}" permanent: true @@ -241,8 +242,8 @@ - iscsi_gw_group_name is defined - iscsi_gw_group_name in group_names - - name: open iscsi api ports - firewalld: + - name: Open iscsi api ports + ansible.posix.firewalld: port: "{{ api_port | default(5000) }}/tcp" zone: "{{ ceph_iscsi_firewall_zone }}" permanent: true @@ -252,8 +253,8 @@ - iscsi_gw_group_name is defined - iscsi_gw_group_name in group_names - - name: open iscsi/prometheus port - firewalld: + - name: Open iscsi/prometheus port + ansible.posix.firewalld: port: "9287/tcp" zone: "{{ ceph_iscsi_firewall_zone }}" permanent: true @@ -263,12 +264,12 @@ - iscsi_gw_group_name is defined - iscsi_gw_group_name in group_names - - name: open dashboard ports - include_tasks: dashboard_firewall.yml + - name: Open dashboard ports + ansible.builtin.include_tasks: dashboard_firewall.yml when: dashboard_enabled | bool - - name: open ceph networks on haproxy - firewalld: + - name: Open ceph networks on haproxy + ansible.posix.firewalld: zone: "{{ ceph_rgwloadbalancer_firewall_zone }}" source: "{{ item }}" permanent: true @@ -279,8 +280,8 @@ - rgwloadbalancer_group_name is defined - rgwloadbalancer_group_name in group_names - - name: open haproxy ports - firewalld: + - name: Open haproxy ports + ansible.posix.firewalld: port: "{{ haproxy_frontend_port | default(80) }}/tcp" zone: "{{ ceph_rgwloadbalancer_firewall_zone }}" permanent: true @@ -290,8 +291,8 @@ - rgwloadbalancer_group_name is defined - rgwloadbalancer_group_name in group_names - - name: add rich rule for keepalived vrrp - firewalld: + - name: Add rich rule for keepalived vrrp + ansible.posix.firewalld: rich_rule: 'rule protocol value="vrrp" accept' permanent: true immediate: true diff --git a/roles/ceph-infra/tasks/dashboard_firewall.yml b/roles/ceph-infra/tasks/dashboard_firewall.yml index 54a0b0c6a3..69639a7e34 100644 --- a/roles/ceph-infra/tasks/dashboard_firewall.yml +++ b/roles/ceph-infra/tasks/dashboard_firewall.yml @@ -1,60 +1,65 @@ --- -- name: open node_exporter port - firewalld: +- name: Open node_exporter port + ansible.posix.firewalld: port: "{{ node_exporter_port }}/tcp" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true immediate: true state: enabled -- block: - - name: open dashboard port - firewalld: +- name: Open dashboard port in firewalld + when: + - mgr_group_name is defined + - (groups.get(mgr_group_name,[]) | length > 0 and mgr_group_name in group_names) or + (groups.get(mgr_group_name,[]) | length == 0 and mon_group_name in group_names) + block: + - name: Open dashboard port + ansible.posix.firewalld: port: "{{ dashboard_port }}/tcp" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true immediate: true state: enabled - - name: open mgr/prometheus port - firewalld: + - name: Open mgr/prometheus port + ansible.posix.firewalld: port: "9283/tcp" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true immediate: true state: enabled - when: - - mgr_group_name is defined - - (groups.get(mgr_group_name,[]) | length > 0 and mgr_group_name in group_names) or - (groups.get(mgr_group_name,[]) | length == 0 and mon_group_name in group_names) -- block: - - name: open grafana port - firewalld: +- name: Open monitoring stack tcp ports in firewalld + when: + - monitoring_group_name is defined + - monitoring_group_name in group_names + block: + - name: Open grafana port + ansible.posix.firewalld: port: "{{ grafana_port }}/tcp" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true immediate: true state: enabled - - name: open prometheus port - firewalld: + - name: Open prometheus port + ansible.posix.firewalld: port: "{{ prometheus_port }}/tcp" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true immediate: true state: enabled - - name: open alertmanager port - firewalld: + - name: Open alertmanager port + ansible.posix.firewalld: port: "{{ alertmanager_port }}/tcp" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true immediate: true state: enabled - - name: open alertmanager cluster port - firewalld: + - name: Open alertmanager cluster port + ansible.posix.firewalld: port: "{{ alertmanager_cluster_port }}/{{ item }}" zone: "{{ ceph_dashboard_firewall_zone }}" permanent: true @@ -63,6 +68,3 @@ with_items: - "tcp" - "udp" - when: - - monitoring_group_name is defined - - monitoring_group_name in group_names diff --git a/roles/ceph-infra/tasks/main.yml b/roles/ceph-infra/tasks/main.yml index 8172dc014a..c44acd9e6f 100644 --- a/roles/ceph-infra/tasks/main.yml +++ b/roles/ceph-infra/tasks/main.yml @@ -1,26 +1,26 @@ --- -- name: update cache for Debian based OSs - apt: - update_cache: yes +- name: Update cache for Debian based OSs + ansible.builtin.apt: + update_cache: true when: ansible_facts['os_family'] == "Debian" register: result until: result is succeeded tags: package-install -- name: include_tasks configure_firewall.yml - include_tasks: configure_firewall.yml +- name: Include_tasks configure_firewall.yml + ansible.builtin.include_tasks: configure_firewall.yml when: - configure_firewall | bool - ansible_facts['os_family'] in ['RedHat', 'Suse'] tags: configure_firewall -- name: include_tasks setup_ntp.yml - include_tasks: setup_ntp.yml +- name: Include_tasks setup_ntp.yml + ansible.builtin.include_tasks: setup_ntp.yml when: ntp_service_enabled | bool tags: configure_ntp -- name: ensure logrotate is installed - package: +- name: Ensure logrotate is installed + ansible.builtin.package: name: logrotate state: present register: result @@ -37,8 +37,8 @@ inventory_hostname in groups.get(rbdmirror_group_name, []) or inventory_hostname in groups.get(iscsi_gw_group_name, []) -- name: add logrotate configuration - template: +- name: Add logrotate configuration + ansible.builtin.template: src: logrotate.conf.j2 dest: /etc/logrotate.d/ceph mode: "0644" @@ -52,4 +52,4 @@ inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(mgr_group_name, []) or inventory_hostname in groups.get(rbdmirror_group_name, []) or - inventory_hostname in groups.get(iscsi_gw_group_name, []) \ No newline at end of file + inventory_hostname in groups.get(iscsi_gw_group_name, []) diff --git a/roles/ceph-infra/tasks/setup_ntp.yml b/roles/ceph-infra/tasks/setup_ntp.yml index e1fed70a21..92cd79fcc4 100644 --- a/roles/ceph-infra/tasks/setup_ntp.yml +++ b/roles/ceph-infra/tasks/setup_ntp.yml @@ -1,66 +1,68 @@ --- -- name: set ntp service and chrony daemon name for Debian family - set_fact: +- name: Set ntp service and chrony daemon name for Debian family + ansible.builtin.set_fact: chrony_daemon_name: chrony ntp_service_name: ntp when: ansible_facts['os_family'] == 'Debian' -- name: set ntp service and chrony daemon name for RedHat and Suse family - set_fact: +- name: Set ntp service and chrony daemon name for RedHat and Suse family + ansible.builtin.set_fact: chrony_daemon_name: chronyd ntp_service_name: ntpd when: ansible_facts['os_family'] in ['RedHat', 'Suse'] # Installation of NTP daemons needs to be a separate task since installations # can't happen on Atomic -- name: install the ntp daemon +- name: Install the ntp daemon when: not is_atomic | bool block: - - name: install ntpd - package: + - name: Install ntpd + ansible.builtin.package: name: ntp state: present register: result until: result is succeeded when: ntp_daemon_type == "ntpd" - - name: install chrony - package: + - name: Install chrony + ansible.builtin.package: name: chrony state: present register: result until: result is succeeded when: ntp_daemon_type == "chronyd" -- name: enable the ntp daemon and disable the rest +- name: Enable the ntp daemon and disable the rest block: - - name: enable timesyncing on timesyncd - command: timedatectl set-ntp on + - name: Enable timesyncing on timesyncd + ansible.builtin.command: timedatectl set-ntp on notify: - - disable ntpd - - disable chronyd + - Disable ntpd + - Disable chronyd + changed_when: false when: ntp_daemon_type == "timesyncd" - - name: disable time sync using timesyncd if we are not using it - command: timedatectl set-ntp no + - name: Disable time sync using timesyncd if we are not using it + ansible.builtin.command: timedatectl set-ntp no + changed_when: false when: ntp_daemon_type != "timesyncd" - - name: enable ntpd - service: + - name: Enable ntpd + ansible.builtin.service: name: "{{ ntp_service_name }}" - enabled: yes + enabled: true state: started notify: - - disable chronyd - - disable timesyncd + - Disable chronyd + - Disable timesyncd when: ntp_daemon_type == "ntpd" - - name: enable chronyd - service: + - name: Enable chronyd + ansible.builtin.service: name: "{{ chrony_daemon_name }}" - enabled: yes + enabled: true state: started notify: - - disable ntpd - - disable timesyncd + - Disable ntpd + - Disable timesyncd when: ntp_daemon_type == "chronyd" diff --git a/roles/ceph-iscsi-gw/defaults/main.yml b/roles/ceph-iscsi-gw/defaults/main.yml index 34707c9f13..972c23a4bd 100644 --- a/roles/ceph-iscsi-gw/defaults/main.yml +++ b/roles/ceph-iscsi-gw/defaults/main.yml @@ -5,13 +5,13 @@ # GENERAL # ########### # Whether or not to generate secure certificate to iSCSI gateway nodes -generate_crt: False +generate_crt: false iscsi_conf_overrides: {} iscsi_pool_name: rbd -#iscsi_pool_size: 3 +# iscsi_pool_size: 3 -copy_admin_key: True +copy_admin_key: true ################## # RBD-TARGET-API # diff --git a/roles/ceph-iscsi-gw/meta/main.yml b/roles/ceph-iscsi-gw/meta/main.yml index a43c7e6065..b7c8f39afa 100644 --- a/roles/ceph-iscsi-gw/meta/main.yml +++ b/roles/ceph-iscsi-gw/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Paul Cuzner description: Installs Ceph iSCSI Gateways license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-iscsi-gw/tasks/common.yml b/roles/ceph-iscsi-gw/tasks/common.yml index 31a9bc9dbb..0a6a2d8d8f 100644 --- a/roles/ceph-iscsi-gw/tasks/common.yml +++ b/roles/ceph-iscsi-gw/tasks/common.yml @@ -1,5 +1,5 @@ --- -- name: get keys from monitors +- name: Get keys from monitors ceph_key: name: client.admin cluster: "{{ cluster }}" @@ -16,8 +16,8 @@ - copy_admin_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: copy ceph key(s) if needed - copy: +- name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "/etc/ceph/{{ cluster }}.client.admin.keyring" content: "{{ _admin_key.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -28,38 +28,38 @@ - copy_admin_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: add mgr ip address to trusted list with dashboard - ipv4 - set_fact: +- name: Add mgr ip address to trusted list with dashboard - ipv4 + ansible.builtin.set_fact: trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}' with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}' when: - dashboard_enabled | bool - ip_version == 'ipv4' -- name: add mgr ip address to trusted list with dashboard - ipv6 - set_fact: +- name: Add mgr ip address to trusted list with dashboard - ipv6 + ansible.builtin.set_fact: trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}' with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}' when: - dashboard_enabled | bool - ip_version == 'ipv6' -- name: deploy gateway settings, used by the ceph_iscsi_config modules +- name: Deploy gateway settings, used by the ceph_iscsi_config modules openstack.config_template.config_template: src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2" dest: /etc/ceph/iscsi-gateway.cfg config_type: ini config_overrides: '{{ iscsi_conf_overrides }}' mode: "0600" - notify: restart ceph rbd-target-api-gw + notify: Restart ceph rbd-target-api-gw -- name: set_fact container_exec_cmd - set_fact: +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: containerized_deployment | bool -- name: create iscsi pool +- name: Create iscsi pool ceph_pool: name: "{{ iscsi_pool_name }}" cluster: "{{ cluster }}" diff --git a/roles/ceph-iscsi-gw/tasks/containerized.yml b/roles/ceph-iscsi-gw/tasks/containerized.yml index c0369c04ec..c1fb992a74 100644 --- a/roles/ceph-iscsi-gw/tasks/containerized.yml +++ b/roles/ceph-iscsi-gw/tasks/containerized.yml @@ -1,31 +1,32 @@ --- -- name: create /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} - file: +- name: Create /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }} + ansible.builtin.file: path: "/var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}" state: directory owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" mode: "{{ ceph_directories_mode }}" -- name: create rbd target log directories - file: +- name: Create rbd target log directories + ansible.builtin.file: path: '/var/log/{{ item }}' state: directory + mode: "0755" with_items: - rbd-target-api - rbd-target-gw - tcmu-runner -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers - systemd: +- name: Systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers + ansible.builtin.systemd: name: "{{ item }}" state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true with_items: - tcmu-runner - rbd-target-gw diff --git a/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml b/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml index 83b03e7991..7bb04882a7 100644 --- a/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml +++ b/roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml @@ -1,88 +1,90 @@ --- -- name: create a temporary directory - tempfile: +- name: Create a temporary directory + ansible.builtin.tempfile: state: directory register: iscsi_ssl_tmp_dir delegate_to: localhost run_once: true -- name: set_fact crt_files - set_fact: +- name: Set_fact crt_files + ansible.builtin.set_fact: crt_files: - "iscsi-gateway.crt" - "iscsi-gateway.key" - "iscsi-gateway.pem" - "iscsi-gateway-pub.key" -- name: check for existing crt file(s) in monitor key/value store - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}" +- name: Check for existing crt file(s) in monitor key/value store + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}" with_items: "{{ crt_files }}" changed_when: false - failed_when: false + failed_when: crt_files_exist.rc not in [0, 22] run_once: true delegate_to: "{{ groups.get(mon_group_name)[0] }}" register: crt_files_exist -- name: set_fact crt_files_missing - set_fact: +- name: Set_fact crt_files_missing + ansible.builtin.set_fact: crt_files_missing: "{{ crt_files_exist.results | selectattr('rc', 'equalto', 0) | map(attribute='rc') | list | length != crt_files | length }}" -- name: generate ssl crt/key files +- name: Generate ssl crt/key files + when: crt_files_missing block: - - name: create ssl crt/key files - command: > + - name: Create ssl crt/key files + ansible.builtin.command: > openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key -x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}" delegate_to: localhost - run_once: True + run_once: true + changed_when: false with_items: "{{ crt_files_exist.results }}" - - name: create pem - shell: > + - name: Create pem # noqa: no-changed-when + ansible.builtin.shell: > cat {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem delegate_to: localhost - run_once: True + run_once: true register: pem with_items: "{{ crt_files_exist.results }}" - - name: create public key from pem - shell: > + - name: Create public key from pem + ansible.builtin.shell: > openssl x509 -inform pem -in {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem -pubkey -noout > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway-pub.key delegate_to: localhost - run_once: True + run_once: true when: pem.changed tags: skip_ansible_lint - - name: slurp ssl crt/key files - slurp: + - name: Slurp ssl crt/key files + ansible.builtin.slurp: src: "{{ iscsi_ssl_tmp_dir.path }}/{{ item }}" register: iscsi_ssl_files_content with_items: "{{ crt_files }}" run_once: true delegate_to: localhost - - name: store ssl crt/key files - command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}" + - name: Store ssl crt/key files + ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}" run_once: true delegate_to: "{{ groups.get(mon_group_name)[0] }}" with_items: "{{ iscsi_ssl_files_content.results }}" - when: crt_files_missing + changed_when: false -- name: copy crt file(s) to gateway nodes - copy: +- name: Copy crt file(s) to gateway nodes + ansible.builtin.copy: content: "{{ item.stdout | b64decode }}" dest: "/etc/ceph/{{ item.item }}" owner: root group: root - mode: 0400 + mode: "0400" changed_when: false with_items: "{{ crt_files_exist.results if not crt_files_missing else iscsi_ssl_files_content.results }}" when: not crt_files_missing -- name: clean temporary directory - file: +- name: Clean temporary directory + ansible.builtin.file: path: "{{ iscsi_ssl_tmp_dir.path }}" - state: absent \ No newline at end of file + state: absent diff --git a/roles/ceph-iscsi-gw/tasks/main.yml b/roles/ceph-iscsi-gw/tasks/main.yml index 0366ce294c..0709a95f1d 100644 --- a/roles/ceph-iscsi-gw/tasks/main.yml +++ b/roles/ceph-iscsi-gw/tasks/main.yml @@ -1,28 +1,28 @@ --- -- name: include common.yml - include_tasks: common.yml +- name: Include common.yml + ansible.builtin.include_tasks: common.yml -- name: include non-container/prerequisites.yml - include_tasks: non-container/prerequisites.yml +- name: Include non-container/prerequisites.yml + ansible.builtin.include_tasks: non-container/prerequisites.yml when: not containerized_deployment | bool # deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files # and transfers them to /etc/ceph directory on each controller. SSL certs are used by # the API for https support. -- name: include deploy_ssl_keys.yml - include_tasks: deploy_ssl_keys.yml +- name: Include deploy_ssl_keys.yml + ansible.builtin.include_tasks: deploy_ssl_keys.yml when: generate_crt | bool -- name: include non-container/configure_iscsi.yml - include_tasks: non-container/configure_iscsi.yml +- name: Include non-container/configure_iscsi.yml + ansible.builtin.include_tasks: non-container/configure_iscsi.yml when: - not containerized_deployment | bool - not use_new_ceph_iscsi | bool -- name: include non-container/postrequisites.yml - include_tasks: non-container/postrequisites.yml +- name: Include non-container/postrequisites.yml + ansible.builtin.include_tasks: non-container/postrequisites.yml when: not containerized_deployment | bool -- name: include containerized.yml - include_tasks: containerized.yml +- name: Include containerized.yml + ansible.builtin.include_tasks: containerized.yml when: containerized_deployment | bool diff --git a/roles/ceph-iscsi-gw/tasks/non-container/configure_iscsi.yml b/roles/ceph-iscsi-gw/tasks/non-container/configure_iscsi.yml index 4138b0c498..e4c4fa19ea 100644 --- a/roles/ceph-iscsi-gw/tasks/non-container/configure_iscsi.yml +++ b/roles/ceph-iscsi-gw/tasks/non-container/configure_iscsi.yml @@ -1,12 +1,12 @@ --- -- name: igw_gateway (tgt) | configure iscsi target (gateway) +- name: Igw_gateway (tgt) | configure iscsi target (gateway) igw_gateway: mode: "target" gateway_iqn: "{{ gateway_iqn }}" gateway_ip_list: "{{ gateway_ip_list }}" register: target -- name: igw_lun | configure luns (create/map rbds and add to lio) +- name: Igw_lun | configure luns (create/map rbds and add to lio) igw_lun: pool: "{{ item.pool }}" image: "{{ item.image }}" @@ -16,14 +16,14 @@ with_items: "{{ rbd_devices }}" register: images -- name: igw_gateway (map) | map luns to the iscsi target +- name: Igw_gateway (map) | map luns to the iscsi target igw_gateway: mode: "map" gateway_iqn: "{{ gateway_iqn }}" gateway_ip_list: "{{ gateway_ip_list }}" register: luns -- name: igw_client | configure client connectivity +- name: Igw_client | configure client connectivity igw_client: client_iqn: "{{ item.client }}" image_list: "{{ item.image_list }}" diff --git a/roles/ceph-iscsi-gw/tasks/non-container/postrequisites.yml b/roles/ceph-iscsi-gw/tasks/non-container/postrequisites.yml index 786cf8cd1c..e405165fe7 100644 --- a/roles/ceph-iscsi-gw/tasks/non-container/postrequisites.yml +++ b/roles/ceph-iscsi-gw/tasks/non-container/postrequisites.yml @@ -1,9 +1,9 @@ -- name: start rbd-target-api and rbd-target-gw - service: +- name: Start rbd-target-api and rbd-target-gw + ansible.builtin.systemd: name: "{{ item }}" state: started - enabled: yes - masked: no + enabled: true + masked: false with_items: - rbd-target-api - rbd-target-gw diff --git a/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml b/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml index 1a3fe404e7..fce541729a 100644 --- a/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml +++ b/roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml @@ -1,76 +1,79 @@ --- -- name: red hat based systems tasks +- name: Red hat based systems tasks when: ansible_facts['os_family'] == 'RedHat' block: - - name: set_fact common_pkgs - set_fact: + - name: Set_fact common_pkgs + ansible.builtin.set_fact: common_pkgs: - tcmu-runner - targetcli - - name: set_fact base iscsi pkgs if new style ceph-iscsi - set_fact: + - name: Set_fact base iscsi pkgs if new style ceph-iscsi + ansible.builtin.set_fact: iscsi_base: - ceph-iscsi when: use_new_ceph_iscsi | bool - - name: set_fact base iscsi pkgs if using older ceph-iscsi-config - set_fact: + - name: Set_fact base iscsi pkgs if using older ceph-iscsi-config + ansible.builtin.set_fact: iscsi_base: - ceph-iscsi-cli - ceph-iscsi-config when: not use_new_ceph_iscsi | bool - - name: when ceph_iscsi_config_dev is true + - name: When ceph_iscsi_config_dev is true when: - ceph_origin == 'repository' - ceph_repository in ['dev', 'community'] - ceph_iscsi_config_dev | bool block: - - name: ceph-iscsi dependency repositories - get_url: + - name: Ceph-iscsi dependency repositories + ansible.builtin.get_url: url: "https://shaman.ceph.com/api/repos/tcmu-runner/main/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}" dest: '/etc/yum.repos.d/tcmu-runner-dev.repo' force: true + mode: "0644" register: result until: result is succeeded - - name: ceph-iscsi development repository - get_url: + - name: Ceph-iscsi development repository + ansible.builtin.get_url: url: "https://shaman.ceph.com/api/repos/{{ item }}/main/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo" dest: '/etc/yum.repos.d/{{ item }}-dev.repo' force: true + mode: "0644" register: result until: result is succeeded with_items: '{{ iscsi_base }}' when: ceph_repository == 'dev' - - name: ceph-iscsi stable repository - get_url: + - name: Ceph-iscsi stable repository + ansible.builtin.get_url: url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo" dest: /etc/yum.repos.d/ceph-iscsi.repo force: true + mode: "0644" register: result until: result is succeeded when: ceph_repository == 'community' - - name: install ceph iscsi package - package: + - name: Install ceph iscsi package + ansible.builtin.package: name: "{{ common_pkgs + iscsi_base }}" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded -- name: check the status of the target.service override - stat: +- name: Check the status of the target.service override + ansible.builtin.stat: path: /etc/systemd/system/target.service register: target -- name: mask the target service - preventing manual start - systemd: +- name: Mask the target service - preventing manual start + ansible.builtin.systemd: name: target - masked: yes - enabled: no + masked: true + enabled: false when: - target.stat.exists - not target.stat.islnk @@ -79,9 +82,9 @@ # We must start rbd-target-gw/api after configure_iscsi.yml to avoid # races where they are both trying to setup the same object during # a rolling update. -- name: start tcmu-runner - service: +- name: Start tcmu-runner + ansible.builtin.systemd: name: tcmu-runner state: started - enabled: yes - masked: no + enabled: true + masked: false diff --git a/roles/ceph-iscsi-gw/tasks/systemd.yml b/roles/ceph-iscsi-gw/tasks/systemd.yml index 7cb0ac92aa..2fc75f6ea8 100644 --- a/roles/ceph-iscsi-gw/tasks/systemd.yml +++ b/roles/ceph-iscsi-gw/tasks/systemd.yml @@ -1,6 +1,6 @@ --- -- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw - template: +- name: Generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw + ansible.builtin.template: src: "{{ role_path }}/templates/{{ item }}.service.j2" dest: /etc/systemd/system/{{ item }}.service owner: "root" @@ -11,5 +11,5 @@ - rbd-target-gw - rbd-target-api notify: - - restart ceph tcmu-runner - - restart ceph rbd-target-api-gw + - Restart ceph tcmu-runner + - Restart ceph rbd-target-api-gw diff --git a/roles/ceph-mds/defaults/main.yml b/roles/ceph-mds/defaults/main.yml index e249d02eb8..cd1342f85c 100644 --- a/roles/ceph-mds/defaults/main.yml +++ b/roles/ceph-mds/defaults/main.yml @@ -35,6 +35,6 @@ ceph_config_keys: [] # DON'T TOUCH ME # ceph_mds_systemd_overrides will override the systemd settings # for the ceph-mds services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_mds_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_mds_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-mds/meta/main.yml b/roles/ceph-mds/meta/main.yml index d37bde4eb5..24fd45daf7 100644 --- a/roles/ceph-mds/meta/main.yml +++ b/roles/ceph-mds/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph Metadata license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-mds/tasks/common.yml b/roles/ceph-mds/tasks/common.yml index f41a5fd59e..5a5d3e4963 100644 --- a/roles/ceph-mds/tasks/common.yml +++ b/roles/ceph-mds/tasks/common.yml @@ -1,6 +1,6 @@ --- -- name: create bootstrap-mds and mds directories - file: +- name: Create bootstrap-mds and mds directories + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -10,7 +10,7 @@ - /var/lib/ceph/bootstrap-mds/ - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }} -- name: get keys from monitors +- name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -30,8 +30,8 @@ - item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: copy ceph key(s) if needed - copy: +- name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ item.item.path }}" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -43,7 +43,7 @@ - item.item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: create mds keyring +- name: Create mds keyring ceph_key: name: "mds.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" diff --git a/roles/ceph-mds/tasks/containerized.yml b/roles/ceph-mds/tasks/containerized.yml index 49f5add724..cb494556d5 100644 --- a/roles/ceph-mds/tasks/containerized.yml +++ b/roles/ceph-mds/tasks/containerized.yml @@ -1,24 +1,24 @@ --- -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: enable ceph-mds.target - service: +- name: Enable ceph-mds.target + ansible.builtin.service: name: ceph-mds.target - enabled: yes - daemon_reload: yes + enabled: true + daemon_reload: true when: containerized_deployment | bool -- name: systemd start mds container - systemd: +- name: Systemd start mds container + ansible.builtin.systemd: name: ceph-mds@{{ ansible_facts['hostname'] }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true -- name: wait for mds socket to exist - command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'" +- name: Wait for mds socket to exist + ansible.builtin.command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'" changed_when: false register: multi_mds_socket retries: 5 diff --git a/roles/ceph-mds/tasks/create_mds_filesystems.yml b/roles/ceph-mds/tasks/create_mds_filesystems.yml index f30527de1f..5bdb80bdc4 100644 --- a/roles/ceph-mds/tasks/create_mds_filesystems.yml +++ b/roles/ceph-mds/tasks/create_mds_filesystems.yml @@ -1,9 +1,10 @@ --- -- import_role: +- name: Import ceph-facts role + ansible.builtin.import_role: name: ceph-facts tasks_from: get_def_crush_rule_name.yml -- name: create filesystem pools +- name: Create filesystem pools ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -22,7 +23,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: create ceph filesystem +- name: Create ceph filesystem ceph_fs: name: "{{ cephfs }}" cluster: "{{ cluster }}" diff --git a/roles/ceph-mds/tasks/main.yml b/roles/ceph-mds/tasks/main.yml index f6ff200f3e..38794092e0 100644 --- a/roles/ceph-mds/tasks/main.yml +++ b/roles/ceph-mds/tasks/main.yml @@ -1,17 +1,17 @@ --- -- name: include create_mds_filesystems.yml - include_tasks: create_mds_filesystems.yml +- name: Include create_mds_filesystems.yml + ansible.builtin.include_tasks: create_mds_filesystems.yml when: - inventory_hostname == groups[mds_group_name] | first - not rolling_update | bool -- name: include common.yml - include_tasks: common.yml +- name: Include common.yml + ansible.builtin.include_tasks: common.yml -- name: non_containerized.yml - include_tasks: non_containerized.yml +- name: Non_containerized.yml + ansible.builtin.include_tasks: non_containerized.yml when: not containerized_deployment | bool -- name: containerized.yml - include_tasks: containerized.yml +- name: Containerized.yml + ansible.builtin.include_tasks: containerized.yml when: containerized_deployment | bool diff --git a/roles/ceph-mds/tasks/non_containerized.yml b/roles/ceph-mds/tasks/non_containerized.yml index ab008e4a01..c9d69f95a8 100644 --- a/roles/ceph-mds/tasks/non_containerized.yml +++ b/roles/ceph-mds/tasks/non_containerized.yml @@ -1,8 +1,8 @@ --- -- name: install ceph mds for debian - apt: +- name: Install ceph mds for debian + ansible.builtin.apt: name: ceph-mds - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" when: - mds_group_name in group_names @@ -10,25 +10,26 @@ register: result until: result is succeeded -- name: install ceph-mds package on redhat or SUSE/openSUSE - package: +- name: Install ceph-mds package on redhat or SUSE/openSUSE + ansible.builtin.package: name: "ceph-mds" - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: - mds_group_name in group_names - ansible_facts['os_family'] in ['Suse', 'RedHat'] -- name: ensure systemd service override directory exists - file: +- name: Ensure systemd service override directory exists + ansible.builtin.file: state: directory path: "/etc/systemd/system/ceph-mds@.service.d/" + mode: "0755" when: - ceph_mds_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: add ceph-mds systemd service overrides +- name: Add ceph-mds systemd service overrides openstack.config_template.config_template: src: "ceph-mds.service.d-overrides.j2" dest: "/etc/systemd/system/ceph-mds@.service.d/ceph-mds-systemd-overrides.conf" @@ -38,10 +39,10 @@ - ceph_mds_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: start and add that the metadata service to the init sequence - service: +- name: Start and add that the metadata service to the init sequence + ansible.builtin.systemd: name: ceph-mds@{{ ansible_facts['hostname'] }} state: started - enabled: yes - masked: no + enabled: true + masked: false changed_when: false diff --git a/roles/ceph-mds/tasks/systemd.yml b/roles/ceph-mds/tasks/systemd.yml index f6970828e5..4c6296b742 100644 --- a/roles/ceph-mds/tasks/systemd.yml +++ b/roles/ceph-mds/tasks/systemd.yml @@ -1,15 +1,16 @@ --- -- name: generate systemd unit file - template: +- name: Generate systemd unit file + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-mds.service.j2" dest: /etc/systemd/system/ceph-mds@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph mdss + notify: Restart ceph mdss -- name: generate systemd ceph-mds target file - copy: +- name: Generate systemd ceph-mds target file + ansible.builtin.copy: src: ceph-mds.target dest: /etc/systemd/system/ceph-mds.target - when: containerized_deployment | bool \ No newline at end of file + mode: "0644" + when: containerized_deployment | bool diff --git a/roles/ceph-mgr/defaults/main.yml b/roles/ceph-mgr/defaults/main.yml index a4dad30d73..cd58ef816d 100644 --- a/roles/ceph-mgr/defaults/main.yml +++ b/roles/ceph-mgr/defaults/main.yml @@ -46,6 +46,6 @@ ceph_config_keys: [] # DON'T TOUCH ME # ceph_mgr_systemd_overrides will override the systemd settings # for the ceph-mgr services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_mgr_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_mgr_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-mgr/meta/main.yml b/roles/ceph-mgr/meta/main.yml index c6407cca16..95b8f79e40 100644 --- a/roles/ceph-mgr/meta/main.yml +++ b/roles/ceph-mgr/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph Manager license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-mgr/tasks/common.yml b/roles/ceph-mgr/tasks/common.yml index f9cc63f6e3..2ed4c05bc5 100644 --- a/roles/ceph-mgr/tasks/common.yml +++ b/roles/ceph-mgr/tasks/common.yml @@ -1,13 +1,13 @@ --- -- name: create mgr directory - file: +- name: Create mgr directory + ansible.builtin.file: path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }} state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" mode: "{{ ceph_directories_mode }}" -- name: fetch ceph mgr keyring +- name: Fetch ceph mgr keyring ceph_key: name: "mgr.{{ ansible_facts['hostname'] }}" caps: @@ -26,10 +26,10 @@ when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)" no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: create and copy keyrings +- name: Create and copy keyrings when: groups.get(mgr_group_name, []) | length > 0 block: - - name: create ceph mgr keyring(s) on a mon node + - name: Create ceph mgr keyring(s) on a mon node ceph_key: name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}" caps: @@ -45,17 +45,17 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" with_items: "{{ groups.get(mgr_group_name, []) }}" - run_once: True + run_once: true delegate_to: "{{ groups[mon_group_name][0] }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: set_fact _mgr_keys - set_fact: + - name: Set_fact _mgr_keys + ansible.builtin.set_fact: _mgr_keys: - { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': "{{ copy_admin_key }}" } - { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true } - - name: get keys from monitors + - name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -72,8 +72,8 @@ - item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: copy ceph key(s) if needed - copy: + - name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ item.item.path }}" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -86,15 +86,15 @@ - item.item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: set mgr key permissions - file: +- name: Set mgr key permissions + ansible.builtin.file: path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" mode: "{{ ceph_keyring_permissions }}" when: cephx | bool -- name: append dashboard modules to ceph_mgr_modules - set_fact: +- name: Append dashboard modules to ceph_mgr_modules + ansible.builtin.set_fact: ceph_mgr_modules: "{{ ceph_mgr_modules | union(['dashboard', 'prometheus']) }}" when: dashboard_enabled | bool diff --git a/roles/ceph-mgr/tasks/main.yml b/roles/ceph-mgr/tasks/main.yml index 0395a9cd29..a4caaa3f5d 100644 --- a/roles/ceph-mgr/tasks/main.yml +++ b/roles/ceph-mgr/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: set_fact container_exec_cmd - set_fact: +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" with_items: "{{ groups.get(mon_group_name, []) }}" delegate_to: "{{ item }}" @@ -8,18 +8,18 @@ run_once: true when: containerized_deployment | bool -- name: include common.yml - include_tasks: common.yml +- name: Include common.yml + ansible.builtin.include_tasks: common.yml -- name: include pre_requisite.yml - include_tasks: pre_requisite.yml +- name: Include pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml when: not containerized_deployment | bool -- name: include start_mgr.yml - include_tasks: start_mgr.yml +- name: Include start_mgr.yml + ansible.builtin.include_tasks: start_mgr.yml -- name: include mgr_modules.yml - include_tasks: mgr_modules.yml +- name: Include mgr_modules.yml + ansible.builtin.include_tasks: mgr_modules.yml when: - ceph_mgr_modules | length > 0 - ((groups[mgr_group_name] | default([]) | length == 0 and inventory_hostname == groups[mon_group_name] | last) or diff --git a/roles/ceph-mgr/tasks/mgr_modules.yml b/roles/ceph-mgr/tasks/mgr_modules.yml index 0f1c542830..6e26d27519 100644 --- a/roles/ceph-mgr/tasks/mgr_modules.yml +++ b/roles/ceph-mgr/tasks/mgr_modules.yml @@ -1,6 +1,6 @@ --- -- name: wait for all mgr to be up - command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" +- name: Wait for all mgr to be up + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} mgr dump -f json" register: mgr_dump retries: 30 delay: 5 @@ -11,22 +11,22 @@ - (mgr_dump.stdout | from_json).available | bool when: not ansible_check_mode -- name: get enabled modules from ceph-mgr - command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" - check_mode: no +- name: Get enabled modules from ceph-mgr + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls" + check_mode: false changed_when: false register: _ceph_mgr_modules delegate_to: "{{ groups[mon_group_name][0] }}" -- name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict) - set_fact: +- name: Set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict) + ansible.builtin.set_fact: _ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}" -- name: set _disabled_ceph_mgr_modules fact - set_fact: +- name: Set _disabled_ceph_mgr_modules fact + ansible.builtin.set_fact: _disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}" -- name: disable ceph mgr enabled modules +- name: Disable ceph mgr enabled modules ceph_mgr_module: name: "{{ item }}" cluster: "{{ cluster }}" @@ -38,7 +38,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: item not in ceph_mgr_modules -- name: add modules to ceph-mgr +- name: Add modules to ceph-mgr ceph_mgr_module: name: "{{ item }}" cluster: "{{ cluster }}" diff --git a/roles/ceph-mgr/tasks/pre_requisite.yml b/roles/ceph-mgr/tasks/pre_requisite.yml index 20f41c479b..0f483947b1 100644 --- a/roles/ceph-mgr/tasks/pre_requisite.yml +++ b/roles/ceph-mgr/tasks/pre_requisite.yml @@ -1,24 +1,24 @@ --- -- name: set_fact ceph_mgr_packages for sso - set_fact: +- name: Set_fact ceph_mgr_packages for sso + ansible.builtin.set_fact: ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}" when: - dashboard_enabled | bool - ansible_facts['distribution'] == 'RedHat' -- name: set_fact ceph_mgr_packages for dashboard - set_fact: +- name: Set_fact ceph_mgr_packages for dashboard + ansible.builtin.set_fact: ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-dashboard']) }}" when: dashboard_enabled | bool -- name: set_fact ceph_mgr_packages for non el7 distribution - set_fact: +- name: Set_fact ceph_mgr_packages for non el7 distribution + ansible.builtin.set_fact: ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}" when: - ansible_facts['os_family'] != 'RedHat' - ansible_facts['distribution_major_version'] | int != 7 -- name: enable crb repository +- name: Enable crb repository community.general.dnf_config_manager: name: crb state: enabled @@ -26,18 +26,18 @@ - ansible_facts['os_family'] == 'RedHat' - ansible_facts['distribution_major_version'] | int == 9 -- name: install ceph-mgr packages on RedHat or SUSE - package: +- name: Install ceph-mgr packages on RedHat or SUSE + ansible.builtin.package: name: '{{ ceph_mgr_packages }}' - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: ansible_facts['os_family'] in ['RedHat', 'Suse'] -- name: install ceph-mgr packages for debian - apt: +- name: Install ceph-mgr packages for debian + ansible.builtin.apt: name: '{{ ceph_mgr_packages }}' - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}" register: result until: result is succeeded diff --git a/roles/ceph-mgr/tasks/start_mgr.yml b/roles/ceph-mgr/tasks/start_mgr.yml index 7b8eaa3143..ffb1bfde92 100644 --- a/roles/ceph-mgr/tasks/start_mgr.yml +++ b/roles/ceph-mgr/tasks/start_mgr.yml @@ -1,13 +1,14 @@ --- -- name: ensure systemd service override directory exists - file: +- name: Ensure systemd service override directory exists + ansible.builtin.file: state: directory path: "/etc/systemd/system/ceph-mgr@.service.d/" + mode: "0755" when: - ceph_mgr_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: add ceph-mgr systemd service overrides +- name: Add ceph-mgr systemd service overrides openstack.config_template.config_template: src: "ceph-mgr.service.d-overrides.j2" dest: "/etc/systemd/system/ceph-mgr@.service.d/ceph-mgr-systemd-overrides.conf" @@ -17,21 +18,21 @@ - ceph_mgr_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml when: containerized_deployment | bool -- name: enable ceph-mgr.target - service: +- name: Enable ceph-mgr.target + ansible.builtin.service: name: ceph-mgr.target - enabled: yes - daemon_reload: yes + enabled: true + daemon_reload: true when: containerized_deployment | bool -- name: systemd start mgr - systemd: +- name: Systemd start mgr + ansible.builtin.systemd: name: ceph-mgr@{{ ansible_facts['hostname'] }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-mgr/tasks/systemd.yml b/roles/ceph-mgr/tasks/systemd.yml index 2cfadc8ca5..7ee9f5a34c 100644 --- a/roles/ceph-mgr/tasks/systemd.yml +++ b/roles/ceph-mgr/tasks/systemd.yml @@ -1,15 +1,16 @@ --- -- name: generate systemd unit file - template: +- name: Generate systemd unit file + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-mgr.service.j2" dest: /etc/systemd/system/ceph-mgr@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph mgrs + notify: Restart ceph mgrs -- name: generate systemd ceph-mgr target file - copy: +- name: Generate systemd ceph-mgr target file + ansible.builtin.copy: src: ceph-mgr.target dest: /etc/systemd/system/ceph-mgr.target - when: containerized_deployment | bool \ No newline at end of file + mode: "0644" + when: containerized_deployment | bool diff --git a/roles/ceph-mon/defaults/main.yml b/roles/ceph-mon/defaults/main.yml index 636bc4506c..66638bda0c 100644 --- a/roles/ceph-mon/defaults/main.yml +++ b/roles/ceph-mon/defaults/main.yml @@ -56,6 +56,6 @@ ceph_config_keys: [] # DON'T TOUCH ME # ceph_mon_systemd_overrides will override the systemd settings # for the ceph-mon services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_mon_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_mon_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-mon/meta/main.yml b/roles/ceph-mon/meta/main.yml index 7f2233a07e..6ade76f5e5 100644 --- a/roles/ceph-mon/meta/main.yml +++ b/roles/ceph-mon/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph Monitor license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-mon/tasks/ceph_keys.yml b/roles/ceph-mon/tasks/ceph_keys.yml index 7ec8c7d788..02d002bcaf 100644 --- a/roles/ceph-mon/tasks/ceph_keys.yml +++ b/roles/ceph-mon/tasks/ceph_keys.yml @@ -1,6 +1,6 @@ --- -- name: waiting for the monitor(s) to form the quorum... - command: > +- name: Waiting for the monitor(s) to form the quorum... + ansible.builtin.command: > {{ container_exec_cmd }} ceph --cluster {{ cluster }} @@ -16,7 +16,7 @@ changed_when: false when: not ansible_check_mode -- name: fetch ceph initial keys +- name: Fetch ceph initial keys ceph_key: state: fetch_initial_keys cluster: "{{ cluster }}" diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index 78575759fd..1535418070 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -1,8 +1,8 @@ --- -- name: cephx related tasks +- name: Cephx related tasks when: cephx | bool block: - - name: check if monitor initial keyring already exists + - name: Check if monitor initial keyring already exists ceph_key: name: mon. cluster: "{{ cluster }}" @@ -14,13 +14,13 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" register: initial_mon_key - run_once: True + run_once: true delegate_to: "{{ running_mon }}" failed_when: initial_mon_key.rc not in [0, 2] no_log: "{{ no_log_on_ceph_key_tasks }}" when: running_mon is defined - - name: generate monitor initial keyring + - name: Generate monitor initial keyring ceph_key: state: generate_secret register: monitor_keyring @@ -33,17 +33,17 @@ or initial_mon_key is not succeeded - - name: set_fact _initial_mon_key_success - set_fact: # when initial_mon_key is registered above, `rc: 2` is considered success. + - name: Set_fact _initial_mon_key_success + ansible.builtin.set_fact: # when initial_mon_key is registered above, `rc: 2` is considered success. _initial_mon_key_success: "{{ initial_mon_key is not skipped and initial_mon_key.rc == 0 }}" - - name: get initial keyring when it already exists - set_fact: + - name: Get initial keyring when it already exists + ansible.builtin.set_fact: monitor_keyring: "{{ (initial_mon_key.stdout | from_json)[0]['key'] if _initial_mon_key_success | bool else monitor_keyring.stdout }}" when: initial_mon_key.stdout|default('')|length > 0 or monitor_keyring is not skipped no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: create monitor initial keyring + - name: Create monitor initial keyring ceph_key: name: mon. dest: "/var/lib/ceph/tmp/" @@ -51,7 +51,7 @@ cluster: "{{ cluster }}" caps: mon: allow * - import_key: False + import_key: false owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" mode: "0400" @@ -60,15 +60,16 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - - name: copy the initial key in /etc/ceph (for containers) - copy: + - name: Copy the initial key in /etc/ceph (for containers) + ansible.builtin.copy: src: /var/lib/ceph/tmp/{{ cluster }}.mon..keyring dest: /etc/ceph/{{ cluster }}.mon.keyring remote_src: true + mode: "0640" when: containerized_deployment | bool -- name: create monitor directory - file: +- name: Create monitor directory + ansible.builtin.file: path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -80,15 +81,15 @@ # # This is only needed when upgrading from older versions of Ceph that used to # run as `root` (https://github.com/ceph/ceph-ansible/issues/1635). -- name: recursively fix ownership of monitor directory - file: +- name: Recursively fix ownership of monitor directory + ansible.builtin.file: path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" recurse: true -- name: create admin keyring +- name: Create admin keyring ceph_authtool: name: client.admin path: /etc/ceph/ceph.client.admin.keyring @@ -100,7 +101,7 @@ mgr: allow * osd: allow * mds: allow * - create_keyring: True + create_keyring: true gen_key: "{{ True if admin_secret == 'admin_secret' else omit }}" add_key: "{{ admin_secret if admin_secret != 'admin_secret' else omit }}" delegate_to: "{{ groups[mon_group_name][0] }}" @@ -113,15 +114,15 @@ - cephx | bool -- name: slurp admin keyring - slurp: +- name: Slurp admin keyring + ansible.builtin.slurp: src: "/etc/ceph/{{ cluster }}.client.admin.keyring" delegate_to: "{{ groups[mon_group_name][0] }}" - run_once: True + run_once: true register: admin_keyring -- name: copy admin keyring over to mons - copy: +- name: Copy admin keyring over to mons + ansible.builtin.copy: dest: "{{ admin_keyring.source }}" content: "{{ admin_keyring.content | b64decode }}" owner: "{{ ceph_uid }}" @@ -130,7 +131,7 @@ delegate_to: "{{ item }}" loop: "{{ groups[mon_group_name] }}" -- name: import admin keyring into mon keyring +- name: Import admin keyring into mon keyring ceph_authtool: path: "/var/lib/ceph/tmp/{{ cluster }}.mon..keyring" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -140,21 +141,21 @@ environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - no_log: False + no_log: false # no_log: "{{ no_log_on_ceph_key_tasks }}" when: - cephx | bool -- name: set_fact ceph-mon container command - set_fact: - ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' +ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-mon' }}" +- name: Set_fact ceph-mon container command + ansible.builtin.set_fact: + ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-mon' }}" -- name: set_fact monmaptool container command - set_fact: - ceph_monmaptool_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=monmaptool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' +ceph_client_docker_image_tag if containerized_deployment | bool else 'monmaptool' }}" +- name: Set_fact monmaptool container command + ansible.builtin.set_fact: + ceph_monmaptool_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=monmaptool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'monmaptool' }}" -- name: generate initial monmap - command: > +- name: Generate initial monmap + ansible.builtin.command: > {{ ceph_monmaptool_cmd }} --create {% for host in _monitor_addresses -%} @@ -171,10 +172,8 @@ args: creates: /etc/ceph/monmap -#[v2:192.168.17.10:3300,v1:192.168.17.10:6789] - -- name: ceph monitor mkfs with keyring - command: > +- name: Ceph monitor mkfs with keyring + ansible.builtin.command: > {{ ceph_mon_cmd }} --cluster {{ cluster }} --setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -188,8 +187,8 @@ creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring when: cephx | bool -- name: ceph monitor mkfs without keyring - command: > +- name: Ceph monitor mkfs without keyring + ansible.builtin.command: > {{ ceph_mon_cmd }} --cluster {{ cluster }} --setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml index 848b744f27..7afebcee04 100644 --- a/roles/ceph-mon/tasks/main.yml +++ b/roles/ceph-mon/tasks/main.yml @@ -1,25 +1,25 @@ --- -- name: set_fact container_exec_cmd - set_fact: +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}" when: containerized_deployment | bool -- name: include deploy_monitors.yml - include_tasks: deploy_monitors.yml +- name: Include deploy_monitors.yml + ansible.builtin.include_tasks: deploy_monitors.yml when: # we test for both container and non-container - (mon_socket is defined and mon_socket.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0) - not switch_to_containers | default(False) | bool -- name: include start_monitor.yml - include_tasks: start_monitor.yml +- name: Include start_monitor.yml + ansible.builtin.include_tasks: start_monitor.yml -- name: include_tasks ceph_keys.yml - include_tasks: ceph_keys.yml +- name: Include_tasks ceph_keys.yml + ansible.builtin.include_tasks: ceph_keys.yml when: not switch_to_containers | default(False) | bool -- name: include secure_cluster.yml - include_tasks: secure_cluster.yml +- name: Include secure_cluster.yml + ansible.builtin.include_tasks: secure_cluster.yml when: - secure_cluster | bool - inventory_hostname == groups[mon_group_name] | first diff --git a/roles/ceph-mon/tasks/secure_cluster.yml b/roles/ceph-mon/tasks/secure_cluster.yml index 63d3cca430..19c3621525 100644 --- a/roles/ceph-mon/tasks/secure_cluster.yml +++ b/roles/ceph-mon/tasks/secure_cluster.yml @@ -1,15 +1,15 @@ --- -- name: collect all the pools - command: > +- name: Collect all the pools + ansible.builtin.command: > {{ container_exec_cmd }} rados --cluster {{ cluster }} lspools changed_when: false register: ceph_pools - check_mode: no + check_mode: false -- name: secure the cluster - command: > +- name: Secure the cluster + ansible.builtin.command: > {{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true changed_when: false with_nested: - - "{{ ceph_pools.stdout_lines|default([]) }}" + - "{{ ceph_pools.stdout_lines | default([]) }}" - "{{ secure_cluster_flags }}" diff --git a/roles/ceph-mon/tasks/start_monitor.yml b/roles/ceph-mon/tasks/start_monitor.yml index 7ecb3600b4..98b6061f76 100644 --- a/roles/ceph-mon/tasks/start_monitor.yml +++ b/roles/ceph-mon/tasks/start_monitor.yml @@ -1,14 +1,15 @@ --- -- name: ensure systemd service override directory exists - file: +- name: Ensure systemd service override directory exists + ansible.builtin.file: state: directory path: "/etc/systemd/system/ceph-mon@.service.d/" + mode: "0755" when: - not containerized_deployment | bool - ceph_mon_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: add ceph-mon systemd service overrides +- name: Add ceph-mon systemd service overrides openstack.config_template.config_template: src: "ceph-mon.service.d-overrides.j2" dest: "/etc/systemd/system/ceph-mon@.service.d/ceph-mon-systemd-overrides.conf" @@ -19,14 +20,14 @@ - ceph_mon_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml when: containerized_deployment | bool -- name: start the monitor service - systemd: +- name: Start the monitor service + ansible.builtin.systemd: name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-mon/tasks/systemd.yml b/roles/ceph-mon/tasks/systemd.yml index ba90b74480..8ae4ccf602 100644 --- a/roles/ceph-mon/tasks/systemd.yml +++ b/roles/ceph-mon/tasks/systemd.yml @@ -1,22 +1,23 @@ --- -- name: generate systemd unit file for mon container - template: +- name: Generate systemd unit file for mon container + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-mon.service.j2" dest: /etc/systemd/system/ceph-mon@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph mons + notify: Restart ceph mons -- name: generate systemd ceph-mon target file - copy: +- name: Generate systemd ceph-mon target file + ansible.builtin.copy: src: ceph-mon.target dest: /etc/systemd/system/ceph-mon.target + mode: "0644" when: containerized_deployment | bool -- name: enable ceph-mon.target - service: +- name: Enable ceph-mon.target + ansible.builtin.service: name: ceph-mon.target - enabled: yes - daemon_reload: yes - when: containerized_deployment | bool \ No newline at end of file + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-nfs/defaults/main.yml b/roles/ceph-nfs/defaults/main.yml index 38248fbee2..5cfbe22a16 100644 --- a/roles/ceph-nfs/defaults/main.yml +++ b/roles/ceph-nfs/defaults/main.yml @@ -84,8 +84,8 @@ ceph_nfs_rgw_squash: "Root_Squash" ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p" # Note: keys are optional and can be generated, but not on containerized, where # they must be configered. -#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" -#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" +# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY" +# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C" rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} ################### @@ -98,19 +98,19 @@ rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }} # https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example # # Example: -#CACHEINODE { - #Entries_HWMark = 100000; -#} +# CACHEINODE { + # Entries_HWMark = 100000; +# } # -#ganesha_core_param_overrides: -#ganesha_ceph_export_overrides: -#ganesha_rgw_export_overrides: -#ganesha_rgw_section_overrides: -#ganesha_log_overrides: -#ganesha_conf_overrides: | -# CACHEINODE { - #Entries_HWMark = 100000; -# } +# ganesha_core_param_overrides: +# ganesha_ceph_export_overrides: +# ganesha_rgw_export_overrides: +# ganesha_rgw_section_overrides: +# ganesha_log_overrides: +# ganesha_conf_overrides: | +# CACHEINODE { + # Entries_HWMark = 100000; +# } ########## # DOCKER # diff --git a/roles/ceph-nfs/meta/main.yml b/roles/ceph-nfs/meta/main.yml index 965c9d164f..53a6746337 100644 --- a/roles/ceph-nfs/meta/main.yml +++ b/roles/ceph-nfs/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Daniel Gryniewicz description: Installs Ceph NFS Gateway license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml index 5e2f0c85e0..587e3b299e 100644 --- a/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml +++ b/roles/ceph-nfs/tasks/create_rgw_nfs_user.yml @@ -1,5 +1,5 @@ --- -- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}" +- name: Create rgw nfs user "{{ ceph_nfs_rgw_user }}" radosgw_user: name: "{{ ceph_nfs_rgw_user }}" cluster: "{{ cluster }}" @@ -15,8 +15,8 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key - set_fact: +- name: Set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key + ansible.builtin.set_fact: ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}" ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}" delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-nfs/tasks/main.yml b/roles/ceph-nfs/tasks/main.yml index f742e486e8..acec885619 100644 --- a/roles/ceph-nfs/tasks/main.yml +++ b/roles/ceph-nfs/tasks/main.yml @@ -1,25 +1,25 @@ --- # global/common requirement -- name: stop nfs server service - systemd: +- name: Stop nfs server service + ansible.builtin.systemd: name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}" state: stopped - enabled: no + enabled: false failed_when: false -- name: include pre_requisite_non_container.yml - include_tasks: pre_requisite_non_container.yml +- name: Include pre_requisite_non_container.yml + ansible.builtin.include_tasks: pre_requisite_non_container.yml when: not containerized_deployment | bool -- name: include pre_requisite_container.yml - include_tasks: pre_requisite_container.yml +- name: Include pre_requisite_container.yml + ansible.builtin.include_tasks: pre_requisite_container.yml when: containerized_deployment | bool -- name: set_fact _rgw_hostname - set_fact: +- name: Set_fact _rgw_hostname + ansible.builtin.set_fact: _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}" -- name: set rgw parameter (log file) +- name: Set rgw parameter (log file) ceph_config: action: set who: "client.rgw.{{ _rgw_hostname }}" @@ -30,12 +30,12 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" loop: "{{ groups.get('nfss', []) }}" -- name: include create_rgw_nfs_user.yml - import_tasks: create_rgw_nfs_user.yml +- name: Include create_rgw_nfs_user.yml + ansible.builtin.import_tasks: create_rgw_nfs_user.yml when: groups.get(mon_group_name, []) | length > 0 -- name: install nfs-ganesha-selinux on RHEL 8 - package: +- name: Install nfs-ganesha-selinux on RHEL 8 + ansible.builtin.package: name: nfs-ganesha-selinux state: present register: result @@ -47,8 +47,8 @@ - ansible_facts['distribution_major_version'] == '8' # NOTE (leseb): workaround for issues with ganesha and librgw -- name: add ganesha_t to permissive domain - selinux_permissive: +- name: Add ganesha_t to permissive domain + community.general.selinux_permissive: name: ganesha_t permissive: true failed_when: false @@ -57,13 +57,13 @@ - ansible_facts['os_family'] == 'RedHat' - ansible_facts['selinux']['status'] == 'enabled' -- name: nfs with external ceph cluster task related +- name: Nfs with external ceph cluster task related when: - groups.get(mon_group_name, []) | length == 0 - ceph_nfs_ceph_user is defined block: - - name: create keyring directory - file: + - name: Create keyring directory + ansible.builtin.file: path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}" state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -73,12 +73,12 @@ - "{{ ceph_nfs_ceph_user }}" - "{{ ansible_facts['hostname'] }}" - - name: set_fact rgw_client_name - set_fact: + - name: Set_fact rgw_client_name + ansible.builtin.set_fact: rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}" - - name: get client cephx keys - copy: + - name: Get client cephx keys + ansible.builtin.copy: dest: "{{ item.1 }}" content: "{{ item.0.content | b64decode }}" mode: "{{ item.0.item.get('mode', '0600') }}" @@ -92,5 +92,5 @@ - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: include start_nfs.yml - import_tasks: start_nfs.yml +- name: Include start_nfs.yml + ansible.builtin.import_tasks: start_nfs.yml diff --git a/roles/ceph-nfs/tasks/pre_requisite_container.yml b/roles/ceph-nfs/tasks/pre_requisite_container.yml index 563e35c6d8..023c8d0794 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_container.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_container.yml @@ -1,17 +1,17 @@ --- -- name: keyring related tasks +- name: Keyring related tasks when: groups.get(mon_group_name, []) | length > 0 block: - - name: set_fact container_exec_cmd - set_fact: + - name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" with_items: "{{ groups.get(mon_group_name, []) }}" delegate_to: "{{ item }}" delegate_facts: true run_once: true - - name: "/var/lib/ceph/radosgw/{{ cluster }}-{{ ansible_facts['hostname'] }}" - file: + - name: Create directories + ansible.builtin.file: path: "{{ item.0 }}" state: "directory" owner: "{{ ceph_uid }}" @@ -20,24 +20,24 @@ delegate_to: "{{ item.1 }}" with_nested: - ["/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", - "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}" ] - - [ "{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}" ] + "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}"] + - ["{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}"] - - name: set_fact keyrings_list - set_fact: + - name: Set_fact keyrings_list + ansible.builtin.set_fact: keyrings_list: - { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" } - { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" } - - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: True, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} } - - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: True, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} } + - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} } + - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} } - - name: create keyrings from a monitor + - name: Create keyrings from a monitor ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" dest: "{{ item.path }}" caps: "{{ item.caps }}" - import_key: True + import_key: true owner: "{{ ceph_uid }}" group: "{{ ceph_uid }}" mode: "0600" @@ -51,7 +51,7 @@ - cephx | bool - item.create | default(False) | bool - - name: get keys from monitors + - name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -69,12 +69,12 @@ - item.copy_key | default(True) | bool no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: debug - debug: + - name: Debug + ansible.builtin.debug: msg: "{{ _rgw_keys }}" - - name: copy ceph key(s) if needed - copy: + - name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ item.item.path }}" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -86,22 +86,23 @@ - item.item.copy_key | default(True) | bool no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: dbus related tasks + - name: Dbus related tasks + when: ceph_nfs_dynamic_exports | bool block: - - name: get file - command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf" + - name: Get file + ansible.builtin.command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf" register: dbus_ganesha_file run_once: true changed_when: false - - name: create dbus service file - copy: + - name: Create dbus service file + ansible.builtin.copy: content: "{{ dbus_ganesha_file.stdout }}" dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf owner: "root" group: "root" mode: "0644" - - name: reload dbus configuration - command: "killall -SIGHUP dbus-daemon" - when: ceph_nfs_dynamic_exports | bool + - name: Reload dbus configuration + ansible.builtin.command: "killall -SIGHUP dbus-daemon" + changed_when: false diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml index 12e50874a9..b3d58d3c81 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container.yml @@ -1,16 +1,16 @@ --- -- name: include red hat based system related tasks - include_tasks: pre_requisite_non_container_red_hat.yml +- name: Include red hat based system related tasks + ansible.builtin.include_tasks: pre_requisite_non_container_red_hat.yml when: ansible_facts['os_family'] == 'RedHat' -- name: include debian based system related tasks - include_tasks: pre_requisite_non_container_debian.yml +- name: Include debian based system related tasks + ansible.builtin.include_tasks: pre_requisite_non_container_debian.yml when: ansible_facts['os_family'] == 'Debian' -- name: install nfs rgw/cephfs gateway - SUSE/openSUSE - zypper: +- name: Install nfs rgw/cephfs gateway - SUSE/openSUSE + community.general.zypper: name: "{{ item.name }}" - disable_gpg_check: yes + disable_gpg_check: true with_items: - { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" } - { name: 'radosgw', install: "{{ nfs_obj_gw }}" } @@ -25,8 +25,8 @@ # NOTE (leseb): we use root:ceph for permissions since ganesha # does not have the right selinux context to read ceph directories. -- name: create rados gateway and ganesha directories - file: +- name: Create rados gateway and ganesha directories + ansible.builtin.file: path: "{{ item.name }}" state: directory owner: "{{ item.owner | default('ceph') }}" @@ -42,12 +42,12 @@ - { name: "/var/run/ceph", create: true } when: item.create | bool -- name: cephx related tasks +- name: Cephx related tasks when: - cephx | bool - groups.get(mon_group_name, []) | length > 0 block: - - name: get keys from monitors + - name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -64,8 +64,8 @@ - item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: copy ceph key(s) if needed - copy: + - name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ item.item.path }}" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -77,10 +77,10 @@ - item.item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: nfs object gateway related tasks + - name: Nfs object gateway related tasks when: nfs_obj_gw | bool block: - - name: create rados gateway keyring + - name: Create rados gateway keyring ceph_key: name: "client.rgw.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -94,4 +94,4 @@ owner: ceph group: ceph mode: "{{ ceph_keyring_permissions }}" - no_log: "{{ no_log_on_ceph_key_tasks }}" \ No newline at end of file + no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml index 9138f71197..d34ef89efd 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml @@ -1,107 +1,110 @@ --- -- name: debian based systems - repo handling +- name: Debian based systems - repo handling when: ceph_origin == 'repository' block: - - name: stable repos specific tasks + - name: Stable repos specific tasks when: - nfs_ganesha_stable | bool - ceph_repository == 'community' block: - - name: add nfs-ganesha stable repository - apt_repository: + - name: Add nfs-ganesha stable repository + ansible.builtin.apt_repository: repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" state: present - update_cache: no + update_cache: false register: add_ganesha_apt_repo - - name: add libntirpc stable repository - apt_repository: + - name: Add libntirpc stable repository + ansible.builtin.apt_repository: repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main" state: present - update_cache: no + update_cache: false register: add_libntirpc_apt_repo when: libntirpc_stable_deb_repo is defined - - name: add nfs-ganesha ppa apt key - apt_key: + - name: Add nfs-ganesha ppa apt key + ansible.builtin.apt_key: keyserver: "{{ nfs_ganesha_apt_keyserver }}" id: "{{ nfs_ganesha_apt_key_id }}" when: - nfs_ganesha_apt_key_id is defined - nfs_ganesha_apt_keyserver is defined - - name: update apt cache - apt: - update_cache: yes + - name: Update apt cache + ansible.builtin.apt: + update_cache: true register: update_ganesha_apt_cache retries: 5 delay: 2 until: update_ganesha_apt_cache is success when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed - - name: debian based systems - dev repos specific tasks + - name: Debian based systems - dev repos specific tasks when: - nfs_ganesha_dev | bool - ceph_repository == 'dev' block: - - name: fetch nfs-ganesha development repository - uri: + - name: Fetch nfs-ganesha development repository + ansible.builtin.uri: url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}" - return_content: yes + return_content: true register: nfs_ganesha_dev_apt_repo - - name: add nfs-ganesha development repository - copy: + - name: Add nfs-ganesha development repository + ansible.builtin.copy: content: "{{ nfs_ganesha_dev_apt_repo.content }}" dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list owner: root group: root - backup: yes + backup: true + mode: "0644" -- name: debain based systems - install required packages +- name: Debain based systems - install required packages block: - - name: debian based systems- non-rhcs installation + - name: Debian based systems- non-rhcs installation when: - (ceph_origin == 'repository' or ceph_origin == 'distro') - ceph_repository != 'rhcs' block: - - name: install nfs rgw/cephfs gateway - debian - apt: + - name: Install nfs rgw/cephfs gateway - debian + ansible.builtin.apt: name: ['nfs-ganesha-rgw', 'radosgw'] - allow_unauthenticated: yes + allow_unauthenticated: true register: result until: result is succeeded when: nfs_obj_gw | bool - - name: install nfs rgw/cephfs gateway - debian - apt: + - name: Install nfs rgw/cephfs gateway - debian + ansible.builtin.apt: name: nfs-ganesha-ceph - allow_unauthenticated: yes + allow_unauthenticated: true register: result until: result is succeeded when: nfs_file_gw | bool - - name: debian based systems - rhcs installation + - name: Debian based systems - rhcs installation when: - (ceph_origin == 'repository' or ceph_origin == 'distro') - ceph_repository == 'rhcs' block: - - name: install red hat storage nfs gateway for debian - apt: + - name: Install red hat storage nfs gateway for debian + ansible.builtin.apt: name: nfs-ganesha - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded - - name: install red hat storage nfs file gateway - apt: + + - name: Install red hat storage nfs file gateway + ansible.builtin.apt: name: nfs-ganesha-ceph - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: nfs_file_gw | bool - - name: install red hat storage nfs obj gateway - apt: + + - name: Install red hat storage nfs obj gateway + ansible.builtin.apt: name: nfs-ganesha-rgw - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: nfs_obj_gw | bool diff --git a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml index a57715c923..92a444822e 100644 --- a/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml +++ b/roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml @@ -1,42 +1,43 @@ --- -- name: red hat based systems - repo handling +- name: Red hat based systems - repo handling when: ceph_origin == 'repository' block: - - name: red hat based systems - stable repo related tasks + - name: Red hat based systems - stable repo related tasks when: - nfs_ganesha_stable | bool - ceph_repository == 'community' block: - - name: add nfs-ganesha stable repository - package: + - name: Add nfs-ganesha stable repository + ansible.builtin.package: name: "{{ centos_release_nfs }}" state: present - - name: red hat based systems - dev repo related tasks + - name: Red hat based systems - dev repo related tasks + when: + - nfs_ganesha_dev | bool + - ceph_repository == 'dev' block: - - name: add nfs-ganesha dev repo - get_url: + - name: Add nfs-ganesha dev repo + ansible.builtin.get_url: url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}" dest: /etc/yum.repos.d/nfs-ganesha-dev.repo + mode: "0644" force: true - when: - - nfs_ganesha_dev | bool - - ceph_repository == 'dev' -- name: red hat based systems - install nfs packages +- name: Red hat based systems - install nfs packages block: - - name: install nfs cephfs gateway - package: + - name: Install nfs cephfs gateway + ansible.builtin.package: name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace'] - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: nfs_file_gw | bool - - name: install redhat nfs-ganesha-rgw and ceph-radosgw packages - package: + - name: Install redhat nfs-ganesha-rgw and ceph-radosgw packages + ansible.builtin.package: name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw'] - state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" + state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}" register: result until: result is succeeded when: nfs_obj_gw | bool diff --git a/roles/ceph-nfs/tasks/start_nfs.yml b/roles/ceph-nfs/tasks/start_nfs.yml index f7d6831386..45e7a26fdb 100644 --- a/roles/ceph-nfs/tasks/start_nfs.yml +++ b/roles/ceph-nfs/tasks/start_nfs.yml @@ -1,53 +1,55 @@ --- -- block: - - name: set_fact exec_cmd_nfs - external - set_fact: - exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring" - delegate_node: "{{ inventory_hostname }}" - when: groups.get(mon_group_name, []) | length == 0 +- name: Nfs various pre-requisites tasks + block: + - name: Set_fact exec_cmd_nfs - external + ansible.builtin.set_fact: + exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring" + delegate_node: "{{ inventory_hostname }}" + when: groups.get(mon_group_name, []) | length == 0 - - name: set_fact exec_cmd_nfs - internal - set_fact: - exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados" - delegate_node: "{{ groups[mon_group_name][0] }}" - when: groups.get(mon_group_name, []) | length > 0 + - name: Set_fact exec_cmd_nfs - internal + ansible.builtin.set_fact: + exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados" + delegate_node: "{{ groups[mon_group_name][0] }}" + when: groups.get(mon_group_name, []) | length > 0 - - name: check if rados index object exists - shell: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}" - changed_when: false - failed_when: false - register: rados_index_exists - check_mode: no - when: ceph_nfs_rados_backend | bool - delegate_to: "{{ delegate_node }}" - run_once: true + - name: Check if rados index object exists + ansible.builtin.shell: "set -o pipefail && {{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls | grep {{ ceph_nfs_rados_export_index }}" + changed_when: false + failed_when: false + register: rados_index_exists + check_mode: false + when: ceph_nfs_rados_backend | bool + delegate_to: "{{ delegate_node }}" + run_once: true - - name: create an empty rados index object - command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" - when: - - ceph_nfs_rados_backend | bool - - rados_index_exists.rc != 0 - delegate_to: "{{ delegate_node }}" - run_once: true + - name: Create an empty rados index object + ansible.builtin.command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null" + when: + - ceph_nfs_rados_backend | bool + - rados_index_exists.rc != 0 + delegate_to: "{{ delegate_node }}" + changed_when: false + run_once: true -- name: create /etc/ganesha - file: +- name: Create /etc/ganesha + ansible.builtin.file: path: /etc/ganesha state: directory owner: root group: root mode: "0755" -- name: generate ganesha configuration file - template: +- name: Generate ganesha configuration file + ansible.builtin.template: src: "ganesha.conf.j2" dest: /etc/ganesha/ganesha.conf owner: "root" group: "root" mode: "0644" - notify: restart ceph nfss + notify: Restart ceph nfss -- name: generate ganesha idmap.conf file +- name: Generate ganesha idmap.conf file openstack.config_template.config_template: src: "idmap.conf.j2" dest: "{{ ceph_nfs_idmap_conf }}" @@ -56,10 +58,10 @@ mode: "0644" config_overrides: "{{ idmap_conf_overrides }}" config_type: ini - notify: restart ceph nfss + notify: Restart ceph nfss -- name: create exports directory - file: +- name: Create exports directory + ansible.builtin.file: path: /etc/ganesha/export.d state: directory owner: "root" @@ -67,37 +69,37 @@ mode: "0755" when: ceph_nfs_dynamic_exports | bool -- name: create exports dir index file - copy: +- name: Create exports dir index file + ansible.builtin.copy: content: "" - force: no + force: false dest: /etc/ganesha/export.d/INDEX.conf owner: "root" group: "root" mode: "0644" when: ceph_nfs_dynamic_exports | bool -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml when: containerized_deployment | bool -- name: systemd start nfs container - systemd: +- name: Systemd start nfs container + ansible.builtin.systemd: name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true when: - containerized_deployment | bool - ceph_nfs_enable_service | bool -- name: start nfs gateway service - systemd: +- name: Start nfs gateway service + ansible.builtin.systemd: name: nfs-ganesha state: started - enabled: yes - masked: no + enabled: true + masked: false when: - not containerized_deployment | bool - ceph_nfs_enable_service | bool diff --git a/roles/ceph-nfs/tasks/systemd.yml b/roles/ceph-nfs/tasks/systemd.yml index 3a13e602d9..1534cf4fdf 100644 --- a/roles/ceph-nfs/tasks/systemd.yml +++ b/roles/ceph-nfs/tasks/systemd.yml @@ -1,9 +1,9 @@ --- -- name: generate systemd unit file - template: +- name: Generate systemd unit file + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-nfs.service.j2" dest: /etc/systemd/system/ceph-nfs@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph nfss \ No newline at end of file + notify: Restart ceph nfss diff --git a/roles/ceph-nfs/templates/systemd-run.j2 b/roles/ceph-nfs/templates/systemd-run.j2 new file mode 100644 index 0000000000..868cd19de3 --- /dev/null +++ b/roles/ceph-nfs/templates/systemd-run.j2 @@ -0,0 +1,27 @@ +#!/bin/sh +T=$1 +N=$2 + +# start nfs-ganesha +/usr/bin/{{ container_binary }} run --rm --net=host \ +{% if container_binary == 'podman' %} + -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \ +{% endif %} +--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \ +-v /var/lib/ceph:/var/lib/ceph:z \ +-v /etc/ceph:/etc/ceph:z \ +-v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \ +-v /etc/ganesha:/etc/ganesha:z \ +-v /var/run/ceph:/var/run/ceph:z \ +-v /var/log/ceph:/var/log/ceph:z \ +-v /var/log/ganesha:/var/log/ganesha:z \ +{% if ceph_nfs_dynamic_exports | bool %} +--privileged \ +-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \ +{% endif -%} +-v /etc/localtime:/etc/localtime:ro \ +{{ ceph_nfs_docker_extra_env }} \ +--entrypoint=/usr/bin/ganesha.nfsd \ +--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \ +{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} +-F -L STDOUT "${GANESHA_EPOCH}" diff --git a/roles/ceph-node-exporter/meta/main.yml b/roles/ceph-node-exporter/meta/main.yml index 633df08e35..8357e97db0 100644 --- a/roles/ceph-node-exporter/meta/main.yml +++ b/roles/ceph-node-exporter/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Boris Ranto description: Configures Prometheus Node Exporter license: Apache - min_ansible_version: 2.4 + min_ansible_version: '2.4' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-node-exporter/tasks/main.yml b/roles/ceph-node-exporter/tasks/main.yml index c187076884..251ffe7594 100644 --- a/roles/ceph-node-exporter/tasks/main.yml +++ b/roles/ceph-node-exporter/tasks/main.yml @@ -1,3 +1,3 @@ --- -- name: include setup_container.yml - include_tasks: setup_container.yml +- name: Include setup_container.yml + ansible.builtin.include_tasks: setup_container.yml diff --git a/roles/ceph-node-exporter/tasks/setup_container.yml b/roles/ceph-node-exporter/tasks/setup_container.yml index a15c021604..7ab311df82 100644 --- a/roles/ceph-node-exporter/tasks/setup_container.yml +++ b/roles/ceph-node-exporter/tasks/setup_container.yml @@ -1,11 +1,11 @@ --- -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: start the node_exporter service - systemd: +- name: Start the node_exporter service + ansible.builtin.systemd: name: node_exporter state: started - enabled: yes - daemon_reload: yes + enabled: true + daemon_reload: true failed_when: false diff --git a/roles/ceph-node-exporter/tasks/systemd.yml b/roles/ceph-node-exporter/tasks/systemd.yml index 4df9920808..0581b54ba5 100644 --- a/roles/ceph-node-exporter/tasks/systemd.yml +++ b/roles/ceph-node-exporter/tasks/systemd.yml @@ -1,8 +1,8 @@ --- -- name: ship systemd service - template: +- name: Ship systemd service + ansible.builtin.template: src: node_exporter.service.j2 dest: "/etc/systemd/system/node_exporter.service" owner: root group: root - mode: 0644 + mode: "0644" diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 8eb1bebf39..37e2d9fa81 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -25,31 +25,31 @@ copy_admin_key: false # All scenario(except 3rd) inherit from the following device declaration # Note: This scenario uses the ceph-volume lvm batch method to provision OSDs -#devices: -# - /dev/sdb -# - /dev/sdc -# - /dev/sdd -# - /dev/sde +# devices: +# - /dev/sdb +# - /dev/sdc +# - /dev/sdd +# - /dev/sde devices: [] # Declare devices to be used as block.db devices -#dedicated_devices: -# - /dev/sdx -# - /dev/sdy +# dedicated_devices: +# - /dev/sdx +# - /dev/sdy dedicated_devices: [] # Declare devices to be used as block.wal devices -#bluestore_wal_devices: -# - /dev/nvme0n1 -# - /dev/nvme0n2 +# bluestore_wal_devices: +# - /dev/nvme0n1 +# - /dev/nvme0n2 bluestore_wal_devices: [] -#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. +# 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above. # Device discovery is based on the Ansible fact 'ansible_facts["devices"]' # which reports all the devices on a system. If chosen, all the disks # found will be passed to ceph-volume lvm batch. You should not be worried on using @@ -60,7 +60,7 @@ osd_auto_discovery: false # Encrypt your OSD device using dmcrypt # If set to True, no matter which osd_objecstore you use the data will be encrypted -dmcrypt: False +dmcrypt: true # Use ceph-volume to create OSDs from logical volumes. # lvm_volumes is a list of dictionaries. @@ -169,8 +169,8 @@ ceph_osd_docker_cpu_limit: 4 # NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16 # NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17 # then, the following would run the OSD on the first NUMA node only. -#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" -#ceph_osd_docker_cpuset_mems: "0" +# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" +# ceph_osd_docker_cpuset_mems: "0" # PREPARE DEVICE # @@ -191,9 +191,9 @@ ceph_osd_numactl_opts: "" # ceph_osd_systemd_overrides will override the systemd settings # for the ceph-osd services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_osd_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_osd_systemd_overrides: +# Service: +# PrivateDevices: false ########### diff --git a/roles/ceph-osd/meta/main.yml b/roles/ceph-osd/meta/main.yml index 3c2a26e89c..a5eb83bf34 100644 --- a/roles/ceph-osd/meta/main.yml +++ b/roles/ceph-osd/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph Object Storage Daemon license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-osd/tasks/common.yml b/roles/ceph-osd/tasks/common.yml index ac690e46fa..59dbc8b4b5 100644 --- a/roles/ceph-osd/tasks/common.yml +++ b/roles/ceph-osd/tasks/common.yml @@ -1,6 +1,6 @@ --- -- name: create bootstrap-osd and osd directories - file: +- name: Create bootstrap-osd and osd directories + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -11,7 +11,7 @@ - /var/lib/ceph/bootstrap-osd/ - /var/lib/ceph/osd/ -- name: get keys from monitors +- name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -31,8 +31,8 @@ - cephx | bool - item.copy_key | bool -- name: copy ceph key(s) if needed - copy: +- name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ item.item.path }}" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -44,4 +44,3 @@ - item is not skipped - item.item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" - diff --git a/roles/ceph-osd/tasks/crush_rules.yml b/roles/ceph-osd/tasks/crush_rules.yml index b2c25cee6b..5f103f3380 100644 --- a/roles/ceph-osd/tasks/crush_rules.yml +++ b/roles/ceph-osd/tasks/crush_rules.yml @@ -1,5 +1,5 @@ --- -- name: configure crush hierarchy +- name: Configure crush hierarchy ceph_crush: cluster: "{{ cluster }}" location: "{{ osd_crush_location }}" @@ -10,7 +10,7 @@ - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool - osd_crush_location is defined -- name: create configured crush rules +- name: Create configured crush rules ceph_crush_rule: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -25,7 +25,7 @@ delegate_to: '{{ groups[mon_group_name][0] }}' run_once: true -- name: get id for new default crush rule +- name: Get id for new default crush rule ceph_crush_rule: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -42,16 +42,16 @@ # If multiple rules are set as default (should not be) then the last one is taken as actual default. # the with_items statement overrides each iteration with the new one. # NOTE(leseb): we should actually fail if multiple rules are set as default -- name: set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name - set_fact: +- name: Set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name + ansible.builtin.set_fact: info_ceph_default_crush_rule_yaml: "{{ item.stdout | default('{}', True) | from_json() }}" ceph_osd_pool_default_crush_rule_name: "{{ (item.stdout | default('{}', True) | from_json).get('rule_name') }}" with_items: "{{ info_ceph_default_crush_rule.results }}" run_once: true when: not item.get('skipped', false) -- name: insert new default crush rule into daemon to prevent restart - command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[item]['monitor_name'] }}.asok config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}" +- name: Insert new default crush rule into daemon to prevent restart + ansible.builtin.command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[item]['monitor_name'] }}.asok config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}" changed_when: false delegate_to: "{{ item }}" with_items: "{{ groups[mon_group_name] }}" @@ -59,12 +59,13 @@ when: - info_ceph_default_crush_rule_yaml | default('') | length > 0 -- name: "add new default crush rule to {{ cluster }}.conf" - ini_file: +- name: Add new default crush rule to ceph config file + community.general.ini_file: dest: "/etc/ceph/{{ cluster }}.conf" section: "global" option: "osd pool default crush rule" value: "{{ info_ceph_default_crush_rule_yaml.rule_id }}" + mode: "0644" delegate_to: "{{ item }}" with_items: "{{ groups[mon_group_name] }}" run_once: true diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 9159b45191..f51fc98459 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -1,10 +1,10 @@ --- -- name: set_fact add_osd - set_fact: +- name: Set_fact add_osd + ansible.builtin.set_fact: add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}" -- name: set_fact container_exec_cmd - set_fact: +- name: Set_fact container_exec_cmd + ansible.builtin.set_fact: container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}" with_items: "{{ groups.get(mon_group_name, []) }}" delegate_to: "{{ item }}" @@ -12,11 +12,11 @@ run_once: true when: containerized_deployment | bool -- name: include_tasks system_tuning.yml - include_tasks: system_tuning.yml +- name: Include_tasks system_tuning.yml + ansible.builtin.include_tasks: system_tuning.yml -- name: install dependencies - package: +- name: Install dependencies + ansible.builtin.package: name: parted state: present register: result @@ -25,8 +25,8 @@ - not containerized_deployment | bool - ansible_facts['os_family'] != 'ClearLinux' -- name: install numactl when needed - package: +- name: Install numactl when needed + ansible.builtin.package: name: numactl register: result until: result is succeeded @@ -35,10 +35,10 @@ - ceph_osd_numactl_opts | length > 0 tags: with_pkg -- name: include_tasks common.yml - include_tasks: common.yml +- name: Include_tasks common.yml + ansible.builtin.include_tasks: common.yml -- name: set noup flag +- name: Set noup flag ceph_osd_flag: name: noup cluster: "{{ cluster }}" @@ -46,27 +46,27 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" delegate_to: "{{ groups[mon_group_name][0] }}" - run_once: True + run_once: true when: - not rolling_update | default(False) | bool - not switch_to_containers | default(False) | bool -- name: include_tasks scenarios/lvm.yml - include_tasks: scenarios/lvm.yml +- name: Include_tasks scenarios/lvm.yml + ansible.builtin.include_tasks: scenarios/lvm.yml when: - lvm_volumes|length > 0 - not rolling_update|default(False) | bool -- name: include_tasks scenarios/lvm-batch.yml - include_tasks: scenarios/lvm-batch.yml +- name: Include_tasks scenarios/lvm-batch.yml + ansible.builtin.include_tasks: scenarios/lvm-batch.yml when: - devices|length > 0 - not rolling_update|default(False) | bool -- name: include_tasks start_osds.yml - include_tasks: start_osds.yml +- name: Include_tasks start_osds.yml + ansible.builtin.include_tasks: start_osds.yml -- name: unset noup flag +- name: Unset noup flag ceph_osd_flag: name: noup cluster: "{{ cluster }}" @@ -80,8 +80,8 @@ - not switch_to_containers | default(False) | bool - inventory_hostname == ansible_play_hosts_all | last -- name: wait for all osd to be up - command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json" +- name: Wait for all osd to be up + ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json" register: wait_for_all_osds_up retries: "{{ nb_retry_wait_osd_up }}" delay: "{{ delay_wait_osd_up }}" @@ -95,14 +95,14 @@ - inventory_hostname == ansible_play_hosts_all | last tags: wait_all_osds_up -- name: include crush_rules.yml - include_tasks: crush_rules.yml +- name: Include crush_rules.yml + ansible.builtin.include_tasks: crush_rules.yml when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool tags: wait_all_osds_up # Create the pools listed in openstack_pools -- name: include openstack_config.yml - include_tasks: openstack_config.yml +- name: Include openstack_config.yml + ansible.builtin.include_tasks: openstack_config.yml when: - not add_osd | bool - not rolling_update | default(False) | bool diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml index 6b05d2c3a3..6b276c2c0f 100644 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -1,7 +1,7 @@ --- -- name: pool related tasks +- name: Pool related tasks block: - - name: create openstack pool(s) + - name: Create openstack pool(s) ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -21,9 +21,12 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: create openstack cephx key(s) +- name: Create openstack cephx key(s) + when: + - cephx | bool + - openstack_config | bool block: - - name: generate keys + - name: Generate keys ceph_key: name: "{{ item.name }}" caps: "{{ item.caps }}" @@ -37,7 +40,7 @@ delegate_to: "{{ groups[mon_group_name][0] }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: get keys from monitors + - name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -51,8 +54,8 @@ delegate_to: "{{ groups.get(mon_group_name)[0] }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: copy ceph key(s) if needed - copy: + - name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring" content: "{{ item.0.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -63,6 +66,3 @@ - "{{ groups[mon_group_name] }}" delegate_to: "{{ item.1 }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - when: - - cephx | bool - - openstack_config | bool diff --git a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml index 52cc388351..a105312536 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm-batch.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm-batch.yml @@ -1,12 +1,12 @@ --- -- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds" +- name: Use ceph-volume lvm batch to create osds ceph_volume: cluster: "{{ cluster }}" objectstore: "{{ osd_objectstore }}" batch_devices: "{{ _devices }}" - dmcrypt: "{{ dmcrypt|default(omit) }}" - crush_device_class: "{{ crush_device_class|default(omit) }}" + dmcrypt: "{{ dmcrypt | default(omit) }}" + crush_device_class: "{{ crush_device_class | default(omit) }}" osds_per_device: "{{ osds_per_device }}" block_db_size: "{{ block_db_size }}" block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}" diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml index 2c19effbe4..68ade3640e 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -1,16 +1,16 @@ --- -- name: "use ceph-volume to create {{ osd_objectstore }} osds" +- name: Use ceph-volume to create osds ceph_volume: cluster: "{{ cluster }}" objectstore: "{{ osd_objectstore }}" data: "{{ item.data }}" - data_vg: "{{ item.data_vg|default(omit) }}" - db: "{{ item.db|default(omit) }}" - db_vg: "{{ item.db_vg|default(omit) }}" - wal: "{{ item.wal|default(omit) }}" - wal_vg: "{{ item.wal_vg|default(omit) }}" + data_vg: "{{ item.data_vg | default(omit) }}" + db: "{{ item.db | default(omit) }}" + db_vg: "{{ item.db_vg | default(omit) }}" + wal: "{{ item.wal | default(omit) }}" + wal_vg: "{{ item.wal_vg | default(omit) }}" crush_device_class: "{{ item.crush_device_class | default(crush_device_class) | default(omit) }}" - dmcrypt: "{{ dmcrypt|default(omit) }}" + dmcrypt: "{{ dmcrypt | default(omit) }}" action: "{{ 'prepare' if containerized_deployment | bool else 'create' }}" environment: CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}" diff --git a/roles/ceph-osd/tasks/start_osds.yml b/roles/ceph-osd/tasks/start_osds.yml index 6a9dd46c66..8dfb1d4f18 100644 --- a/roles/ceph-osd/tasks/start_osds.yml +++ b/roles/ceph-osd/tasks/start_osds.yml @@ -1,14 +1,14 @@ --- # this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph -- name: get osd ids - shell: ls /var/lib/ceph/osd/ | sed 's/.*-//' # noqa 306 +- name: Get osd ids + ansible.builtin.shell: ls /var/lib/ceph/osd/ | sed 's/.*-//' # noqa risky-shell-pipe args: executable: /bin/bash changed_when: false failed_when: false register: osd_ids_non_container -- name: collect osd ids +- name: Collect osd ids ceph_volume: cluster: "{{ cluster }}" action: list @@ -17,19 +17,20 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" register: ceph_osd_ids -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml when: containerized_deployment | bool -- name: ensure systemd service override directory exists - file: +- name: Ensure systemd service override directory exists + ansible.builtin.file: state: directory path: "/etc/systemd/system/ceph-osd@.service.d/" + mode: "0755" when: - ceph_osd_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: add ceph-osd systemd service overrides +- name: Add ceph-osd systemd service overrides openstack.config_template.config_template: src: "ceph-osd.service.d-overrides.j2" dest: "/etc/systemd/system/ceph-osd@.service.d/ceph-osd-systemd-overrides.conf" @@ -39,8 +40,8 @@ - ceph_osd_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' -- name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present - file: +- name: Ensure /var/lib/ceph/osd/- is present + ansible.builtin.file: state: directory path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" mode: "{{ ceph_directories_mode }}" @@ -48,8 +49,8 @@ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" -- name: write /var/lib/ceph/osd/{{ cluster }}-{{ osd_id }}/run - template: +- name: Write run file in /var/lib/ceph/osd/xxxx/run + ansible.builtin.template: src: systemd-run.j2 dest: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}/run" mode: "0700" @@ -58,11 +59,11 @@ with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" when: containerized_deployment | bool -- name: systemd start osd - systemd: +- name: Systemd start osd + ansible.builtin.systemd: name: ceph-osd@{{ item }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}" diff --git a/roles/ceph-osd/tasks/system_tuning.yml b/roles/ceph-osd/tasks/system_tuning.yml index e422b97096..4dae81b11c 100644 --- a/roles/ceph-osd/tasks/system_tuning.yml +++ b/roles/ceph-osd/tasks/system_tuning.yml @@ -1,6 +1,6 @@ --- -- name: create tmpfiles.d directory - file: +- name: Create tmpfiles.d directory + ansible.builtin.file: path: "/etc/tmpfiles.d" state: "directory" owner: "root" @@ -9,8 +9,8 @@ register: "tmpfiles_d" when: disable_transparent_hugepage | bool -- name: disable transparent hugepage - template: +- name: Disable transparent hugepage + ansible.builtin.template: src: "tmpfiles_hugepage.j2" dest: "/etc/tmpfiles.d/ceph_transparent_hugepage.conf" group: "root" @@ -20,23 +20,23 @@ validate: "systemd-tmpfiles --create %s" when: disable_transparent_hugepage | bool -- name: get default vm.min_free_kbytes - slurp: +- name: Get default vm.min_free_kbytes + ansible.builtin.slurp: src: /proc/sys/vm/min_free_kbytes register: default_vm_min_free_kbytes -- name: set_fact vm_min_free_kbytes - set_fact: +- name: Set_fact vm_min_free_kbytes + ansible.builtin.set_fact: vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}" -- name: apply operating system tuning - sysctl: +- name: Apply operating system tuning + ansible.posix.sysctl: name: "{{ item.name }}" value: "{{ item.value }}" state: present sysctl_file: /etc/sysctl.d/ceph-tuning.conf - sysctl_set: yes - ignoreerrors: yes + sysctl_set: true + ignoreerrors: true with_items: - { name: "fs.aio-max-nr", value: "1048576", enable: "{{ osd_objectstore == 'bluestore' }}" } - "{{ os_tuning_params }}" diff --git a/roles/ceph-osd/tasks/systemd.yml b/roles/ceph-osd/tasks/systemd.yml index 6d4685bb65..3531cfc714 100644 --- a/roles/ceph-osd/tasks/systemd.yml +++ b/roles/ceph-osd/tasks/systemd.yml @@ -1,22 +1,23 @@ --- -- name: generate systemd unit file - template: +- name: Generate systemd unit file + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-osd.service.j2" dest: /etc/systemd/system/ceph-osd@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph osds + notify: Restart ceph osds -- name: generate systemd ceph-osd target file - copy: +- name: Generate systemd ceph-osd target file + ansible.builtin.copy: src: ceph-osd.target dest: /etc/systemd/system/ceph-osd.target + mode: "0644" when: containerized_deployment | bool -- name: enable ceph-osd.target - service: +- name: Enable ceph-osd.target + ansible.builtin.service: name: ceph-osd.target - enabled: yes - daemon_reload: yes - when: containerized_deployment | bool \ No newline at end of file + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-prometheus/files/ceph_dashboard.yml b/roles/ceph-prometheus/files/ceph_dashboard.yml index 0c95d4daff..8a7b68e9a4 100644 --- a/roles/ceph-prometheus/files/ceph_dashboard.yml +++ b/roles/ceph-prometheus/files/ceph_dashboard.yml @@ -1,115 +1,115 @@ groups: -- name: dashboard - rules: - - alert: Ceph Health Warning - expr: ceph_health_status == 1 - for: 1m - labels: - severity: page - annotations: - summary: "Ceph Health Warning" - description: "Overall Ceph Health" - - alert: Ceph Health Error - expr: ceph_health_status > 1 - for: 1m - labels: - severity: page - annotations: - summary: "Ceph Health Error" - description: "The Ceph cluster health is in an error state" - - alert: Disk(s) Near Full - expr: (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) * 100 > 85 - for: 1m - labels: - severity: page - annotations: - summary: "Disk(s) Near Full" - description: "This shows how many disks are at or above 85% full. Performance may degrade beyond this threshold on filestore (XFS) backed OSD's." - - alert: OSD(s) Down - expr: ceph_osd_up < 0.5 - for: 1m - labels: - severity: page - annotations: - summary: "OSD(s) Down" - description: "This indicates that one or more OSDs is currently marked down in the cluster." - - alert: OSD Host(s) Down - expr: count by(instance) (ceph_disk_occupation * on(ceph_daemon) group_right(instance) ceph_osd_up == 0) - count by(instance) (ceph_disk_occupation) == 0 - for: 1m - labels: - severity: page - annotations: - summary: "OSD Host(s) Down" - description: "This indicates that one or more OSD hosts is currently down in the cluster." - - alert: PG(s) Stuck - expr: max(ceph_osd_numpg) > scalar(ceph_pg_active) - for: 1m - labels: - severity: page - annotations: - summary: "PG(s) Stuck" - description: "This indicates there are pg's in a stuck state, manual intervention needed to resolve." - - alert: OSD Host Loss Check - expr: max(sum(ceph_osd_stat_bytes - ceph_osd_stat_bytes_used)) * 0.9 < scalar(max(sum by (instance) (ceph_osd_stat_bytes + on (ceph_daemon) group_left (instance) (ceph_disk_occupation*0)))) - for: 1m - labels: - severity: page - annotations: - summary: "OSD Host Loss Check" - description: "This indicates that the cluster @ 90% full is not enough to support the loss of the largest OSD host." - - alert: Slow OSD Responses - expr: ((irate(node_disk_read_time_seconds_total[5m]) / clamp_min(irate(node_disk_reads_completed_total[5m]), 1) + irate(node_disk_write_time_seconds_total[5m]) / clamp_min(irate(node_disk_writes_completed_total[5m]), 1)) and on (instance, device) ceph_disk_occupation) > 1 - for: 1m - labels: - severity: page - annotations: - summary: "Slow OSD Responses" - description: "This indicates that some OSD Latencies are above 1s." - - alert: Network Errors - expr: sum by (instance, device) (irate(node_network_receive_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_receive_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m])) > 10 - for: 1m - labels: - severity: page - annotations: - summary: "Network Errors" - description: "This indicates that more than 10 dropped/error packets are seen in a 5m interval" - - alert: Pool Capacity Low - expr: (ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) * 100 + on (pool_id) group_left (name) (ceph_pool_metadata*0)) > 85 - for: 1m - labels: - severity: page - annotations: - summary: "Pool Capacity Low" - description: "This indicates a low capacity in a pool." - - alert: MON(s) Down - expr: ceph_mon_quorum_status != 1 - for: 1m - labels: - severity: page - annotations: - summary: "MON(s) down" - description: "This indicates that one or more MON(s) is down." - - alert: Cluster Capacity Low - expr: sum(ceph_osd_stat_bytes_used) / sum(ceph_osd_stat_bytes) > 0.85 - for: 1m - labels: - severity: page - annotations: - summary: "Cluster Capacity Low" - description: "This indicates raw used space crosses the 85% capacity threshold of the ceph cluster." - - alert: OSD(s) with High PG Count - expr: ceph_osd_numpg > 275 - for: 1m - labels: - severity: page - annotations: - summary: "OSD(s) with High PG Count" - description: "This indicates there are some OSDs with high PG count (275+)." - - alert: Slow OSD Ops - expr: ceph_healthcheck_slow_ops > 0 - for: 1m - labels: - severity: page - annotations: - summary: "Slow OSD Ops" - description: "OSD requests are taking too long to process (osd_op_complaint_time exceeded)" + - name: Dashboard + rules: + - alert: Ceph Health Warning + expr: ceph_health_status == 1 + for: 1m + labels: + severity: page + annotations: + summary: "Ceph Health Warning" + description: "Overall Ceph Health" + - alert: Ceph Health Error + expr: ceph_health_status > 1 + for: 1m + labels: + severity: page + annotations: + summary: "Ceph Health Error" + description: "The Ceph cluster health is in an error state" + - alert: Disk(s) Near Full + expr: (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) * 100 > 85 + for: 1m + labels: + severity: page + annotations: + summary: "Disk(s) Near Full" + description: "This shows how many disks are at or above 85% full. Performance may degrade beyond this threshold on filestore (XFS) backed OSD's." + - alert: OSD(s) Down + expr: ceph_osd_up < 0.5 + for: 1m + labels: + severity: page + annotations: + summary: "OSD(s) Down" + description: "This indicates that one or more OSDs is currently marked down in the cluster." + - alert: OSD Host(s) Down + expr: count by(instance) (ceph_disk_occupation * on(ceph_daemon) group_right(instance) ceph_osd_up == 0) - count by(instance) (ceph_disk_occupation) == 0 + for: 1m + labels: + severity: page + annotations: + summary: "OSD Host(s) Down" + description: "This indicates that one or more OSD hosts is currently down in the cluster." + - alert: PG(s) Stuck + expr: max(ceph_osd_numpg) > scalar(ceph_pg_active) + for: 1m + labels: + severity: page + annotations: + summary: "PG(s) Stuck" + description: "This indicates there are pg's in a stuck state, manual intervention needed to resolve." + - alert: OSD Host Loss Check + expr: max(sum(ceph_osd_stat_bytes - ceph_osd_stat_bytes_used)) * 0.9 < scalar(max(sum by (instance) (ceph_osd_stat_bytes + on (ceph_daemon) group_left (instance) (ceph_disk_occupation*0)))) + for: 1m + labels: + severity: page + annotations: + summary: "OSD Host Loss Check" + description: "This indicates that the cluster @ 90% full is not enough to support the loss of the largest OSD host." + - alert: Slow OSD Responses + expr: ((irate(node_disk_read_time_seconds_total[5m]) / clamp_min(irate(node_disk_reads_completed_total[5m]), 1) + irate(node_disk_write_time_seconds_total[5m]) / clamp_min(irate(node_disk_writes_completed_total[5m]), 1)) and on (instance, device) ceph_disk_occupation) > 1 + for: 1m + labels: + severity: page + annotations: + summary: "Slow OSD Responses" + description: "This indicates that some OSD Latencies are above 1s." + - alert: Network Errors + expr: sum by (instance, device) (irate(node_network_receive_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_receive_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m])) > 10 + for: 1m + labels: + severity: page + annotations: + summary: "Network Errors" + description: "This indicates that more than 10 dropped/error packets are seen in a 5m interval" + - alert: Pool Capacity Low + expr: (ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) * 100 + on (pool_id) group_left (name) (ceph_pool_metadata*0)) > 85 + for: 1m + labels: + severity: page + annotations: + summary: "Pool Capacity Low" + description: "This indicates a low capacity in a pool." + - alert: MON(s) Down + expr: ceph_mon_quorum_status != 1 + for: 1m + labels: + severity: page + annotations: + summary: "MON(s) down" + description: "This indicates that one or more MON(s) is down." + - alert: Cluster Capacity Low + expr: sum(ceph_osd_stat_bytes_used) / sum(ceph_osd_stat_bytes) > 0.85 + for: 1m + labels: + severity: page + annotations: + summary: "Cluster Capacity Low" + description: "This indicates raw used space crosses the 85% capacity threshold of the ceph cluster." + - alert: OSD(s) with High PG Count + expr: ceph_osd_numpg > 275 + for: 1m + labels: + severity: page + annotations: + summary: "OSD(s) with High PG Count" + description: "This indicates there are some OSDs with high PG count (275+)." + - alert: Slow OSD Ops + expr: ceph_healthcheck_slow_ops > 0 + for: 1m + labels: + severity: page + annotations: + summary: "Slow OSD Ops" + description: "OSD requests are taking too long to process (osd_op_complaint_time exceeded)" diff --git a/roles/ceph-prometheus/handlers/main.yml b/roles/ceph-prometheus/handlers/main.yml index dca87676aa..8aa7e06d3a 100644 --- a/roles/ceph-prometheus/handlers/main.yml +++ b/roles/ceph-prometheus/handlers/main.yml @@ -1,8 +1,8 @@ --- -- name: service handler +- name: Service handler # We use the systemd module here so we can use the daemon_reload feature, # since we're shipping the .service file ourselves - systemd: + ansible.builtin.systemd: name: "{{ item }}" daemon_reload: true enabled: true diff --git a/roles/ceph-prometheus/meta/main.yml b/roles/ceph-prometheus/meta/main.yml index fc0b1e2803..b88a5db608 100644 --- a/roles/ceph-prometheus/meta/main.yml +++ b/roles/ceph-prometheus/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Boris Ranto description: Configures Prometheus for Ceph Dashboard license: Apache - min_ansible_version: 2.4 + min_ansible_version: '2.4' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-prometheus/tasks/main.yml b/roles/ceph-prometheus/tasks/main.yml index ea43aceb66..7206da2f17 100644 --- a/roles/ceph-prometheus/tasks/main.yml +++ b/roles/ceph-prometheus/tasks/main.yml @@ -1,60 +1,63 @@ --- -- name: create prometheus directories - file: +- name: Create prometheus directories + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" + mode: "0755" with_items: - - "{{ prometheus_conf_dir }}" - - "{{ prometheus_data_dir }}" + - "{{ prometheus_conf_dir }}" + - "{{ prometheus_data_dir }}" -- name: write prometheus config file +- name: Write prometheus config file openstack.config_template.config_template: src: prometheus.yml.j2 dest: "{{ prometheus_conf_dir }}/prometheus.yml" owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" - mode: 0640 + mode: "0640" config_type: yaml config_overrides: "{{ prometheus_conf_overrides }}" - notify: service handler + notify: Service handler -- name: make sure the alerting rules directory exists - file: +- name: Make sure the alerting rules directory exists + ansible.builtin.file: path: "/etc/prometheus/alerting/" state: directory owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" + mode: "0755" -- name: copy alerting rules - copy: +- name: Copy alerting rules + ansible.builtin.copy: src: "ceph_dashboard.yml" dest: "/etc/prometheus/alerting/ceph_dashboard.yml" owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" - mode: 0644 + mode: "0644" -- name: create alertmanager directories - file: +- name: Create alertmanager directories + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" + mode: "0755" with_items: - - "{{ alertmanager_conf_dir }}" - - "{{ alertmanager_data_dir }}" + - "{{ alertmanager_conf_dir }}" + - "{{ alertmanager_data_dir }}" -- name: write alertmanager config file +- name: Write alertmanager config file openstack.config_template.config_template: src: alertmanager.yml.j2 dest: "{{ alertmanager_conf_dir }}/alertmanager.yml" owner: "{{ prometheus_user_id }}" group: "{{ prometheus_user_id }}" - mode: 0640 + mode: "0640" config_type: yaml config_overrides: "{{ alertmanager_conf_overrides }}" - notify: service handler + notify: Service handler -- name: include setup_container.yml - include_tasks: setup_container.yml +- name: Include setup_container.yml + ansible.builtin.include_tasks: setup_container.yml diff --git a/roles/ceph-prometheus/tasks/setup_container.yml b/roles/ceph-prometheus/tasks/setup_container.yml index 25eec6e961..b2034adfb4 100644 --- a/roles/ceph-prometheus/tasks/setup_container.yml +++ b/roles/ceph-prometheus/tasks/setup_container.yml @@ -1,9 +1,9 @@ --- -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: start prometheus services - systemd: +- name: Start prometheus services + ansible.builtin.systemd: name: "{{ item }}" daemon_reload: true enabled: true diff --git a/roles/ceph-prometheus/tasks/systemd.yml b/roles/ceph-prometheus/tasks/systemd.yml index eb587ac62b..b35cad3643 100644 --- a/roles/ceph-prometheus/tasks/systemd.yml +++ b/roles/ceph-prometheus/tasks/systemd.yml @@ -1,12 +1,12 @@ --- -- name: ship systemd services - template: +- name: Ship systemd services + ansible.builtin.template: src: "{{ item }}.j2" dest: "/etc/systemd/system/{{ item }}" owner: root group: root - mode: 0644 + mode: "0644" with_items: - 'alertmanager.service' - 'prometheus.service' - notify: service handler + notify: Service handler diff --git a/roles/ceph-rbd-mirror/defaults/main.yml b/roles/ceph-rbd-mirror/defaults/main.yml index ee6d73b047..0b5c885519 100644 --- a/roles/ceph-rbd-mirror/defaults/main.yml +++ b/roles/ceph-rbd-mirror/defaults/main.yml @@ -41,6 +41,6 @@ ceph_config_keys: [] # DON'T TOUCH ME # ceph_rbd_mirror_systemd_overrides will override the systemd settings # for the ceph-rbd-mirror services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_rbd_mirror_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_rbd_mirror_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-rbd-mirror/meta/main.yml b/roles/ceph-rbd-mirror/meta/main.yml index c89a3e60ef..a5b1f2305f 100644 --- a/roles/ceph-rbd-mirror/meta/main.yml +++ b/roles/ceph-rbd-mirror/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph Mirror Agent license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml b/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml index c83d17d416..f6c7100270 100644 --- a/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml +++ b/roles/ceph-rbd-mirror/tasks/configure_mirroring.yml @@ -1,9 +1,9 @@ --- -- name: cephx tasks +- name: Cephx tasks when: - cephx | bool block: - - name: get client.bootstrap-rbd-mirror from ceph monitor + - name: Get client.bootstrap-rbd-mirror from ceph monitor ceph_key: name: client.bootstrap-rbd-mirror cluster: "{{ cluster }}" @@ -17,15 +17,16 @@ run_once: true no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: ensure /var/lib/ceph/bootstrap-rbd-mirror exists - file: + - name: Ensure /var/lib/ceph/bootstrap-rbd-mirror exists + ansible.builtin.file: path: /var/lib/ceph/bootstrap-rbd-mirror state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" + mode: "0750" - - name: copy ceph key(s) - copy: + - name: Copy ceph key(s) + ansible.builtin.copy: dest: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring" content: "{{ _bootstrap_rbd_mirror_key.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -33,7 +34,7 @@ mode: "{{ ceph_keyring_permissions }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: create rbd-mirror keyrings + - name: Create rbd-mirror keyrings ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -60,7 +61,7 @@ dest: "/etc/ceph/{{ cluster }}.{{ ceph_rbd_mirror_local_user }}.keyring", secret: "{{ ceph_rbd_mirror_local_user_secret | default('') }}" } - - name: get "client.rbd-mirror.{{ ansible_facts['hostname'] }}" from ceph monitor + - name: Get client.rbd-mirror keyring from ceph monitor ceph_key: name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}" cluster: "{{ cluster }}" @@ -73,8 +74,8 @@ delegate_to: "{{ groups.get(mon_group_name)[0] }}" no_log: "{{ no_log_on_ceph_key_tasks }}" - - name: copy ceph key - copy: + - name: Copy ceph key + ansible.builtin.copy: dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring" content: "{{ _rbd_mirror_key.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -82,24 +83,24 @@ mode: "{{ ceph_keyring_permissions }}" no_log: false -- name: start and add the rbd-mirror service instance - service: +- name: Start and add the rbd-mirror service instance + ansible.builtin.service: name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" state: started - enabled: yes - masked: no + enabled: true + masked: false changed_when: false when: - not containerized_deployment | bool - ceph_rbd_mirror_remote_user is defined -- name: set_fact ceph_rbd_mirror_pools - set_fact: +- name: Set_fact ceph_rbd_mirror_pools + ansible.builtin.set_fact: ceph_rbd_mirror_pools: - name: "{{ ceph_rbd_mirror_pool }}" when: ceph_rbd_mirror_pools is undefined -- name: create pool if it doesn't exist +- name: Create pool if it doesn't exist ceph_pool: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -119,8 +120,8 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: enable mirroring on the pool - command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool enable {{ item.name }} {{ ceph_rbd_mirror_mode }}" +- name: Enable mirroring on the pool + ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool enable {{ item.name }} {{ ceph_rbd_mirror_mode }}" register: result changed_when: false retries: 60 @@ -129,40 +130,41 @@ loop: "{{ ceph_rbd_mirror_pools }}" delegate_to: "{{ groups[mon_group_name][0] }}" -- name: add mirroring peer +- name: Add mirroring peer when: ceph_rbd_mirror_remote_user is defined block: - - name: list mirroring peer - command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool info {{ item.name }}" + - name: List mirroring peer + ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool info {{ item.name }}" changed_when: false register: mirror_peer loop: "{{ ceph_rbd_mirror_pools }}" delegate_to: "{{ groups[mon_group_name][0] }}" - - name: create a temporary file - tempfile: + - name: Create a temporary file + ansible.builtin.tempfile: path: /etc/ceph state: file suffix: _ceph-ansible register: tmp_file delegate_to: "{{ groups[mon_group_name][0] }}" - - name: write secret to temporary file - copy: + - name: Write secret to temporary file + ansible.builtin.copy: dest: "{{ tmp_file.path }}" content: "{{ ceph_rbd_mirror_remote_key }}" + mode: "0644" delegate_to: "{{ groups[mon_group_name][0] }}" - - name: add a mirroring peer - command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool peer add {{ item.item.name }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ceph_rbd_mirror_remote_mon_hosts }} --remote-key-file {{ tmp_file.path }}" + - name: Add a mirroring peer + ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool peer add {{ item.item.name }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ceph_rbd_mirror_remote_mon_hosts }} --remote-key-file {{ tmp_file.path }}" changed_when: false delegate_to: "{{ groups[mon_group_name][0] }}" loop: "{{ mirror_peer.results }}" run_once: true when: ceph_rbd_mirror_remote_user not in item.stdout - - name: rm temporary file - file: + - name: Rm temporary file + ansible.builtin.file: path: "{{ tmp_file.path }}" state: absent delegate_to: "{{ groups[mon_group_name][0] }}" diff --git a/roles/ceph-rbd-mirror/tasks/main.yml b/roles/ceph-rbd-mirror/tasks/main.yml index 63ee613055..9a51b0c943 100644 --- a/roles/ceph-rbd-mirror/tasks/main.yml +++ b/roles/ceph-rbd-mirror/tasks/main.yml @@ -1,26 +1,27 @@ --- -- name: non-containerized related tasks +- name: Non-containerized related tasks when: - not containerized_deployment | bool - ceph_rbd_mirror_remote_user is defined block: - - name: install dependencies - package: + - name: Install dependencies + ansible.builtin.package: name: rbd-mirror state: present register: result until: result is succeeded tags: package-install - - name: ensure systemd service override directory exists - file: + - name: Ensure systemd service override directory exists + ansible.builtin.file: state: directory path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/" + mode: "0755" when: - ceph_rbd_mirror_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' - - name: add ceph-rbd-mirror systemd service overrides + - name: Add ceph-rbd-mirror systemd service overrides openstack.config_template.config_template: src: "ceph-rbd-mirror.service.d-overrides.j2" dest: "/etc/systemd/system/ceph-rbd-mirror@.service.d/ceph-rbd-mirror-systemd-overrides.conf" @@ -30,23 +31,23 @@ - ceph_rbd_mirror_systemd_overrides is defined - ansible_facts['service_mgr'] == 'systemd' - - name: enable ceph-rbd-mirror.target - systemd: + - name: Enable ceph-rbd-mirror.target + ansible.builtin.systemd: name: "ceph-rbd-mirror.target" state: started - enabled: yes - masked: no + enabled: true + masked: false changed_when: false -- name: set_fact ceph_cmd - set_fact: +- name: Set_fact ceph_cmd + ansible.builtin.set_fact: rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rbd' }}" -- name: include configure_mirroring.yml - include_tasks: configure_mirroring.yml +- name: Include configure_mirroring.yml + ansible.builtin.include_tasks: configure_mirroring.yml -- name: include start_container_rbd_mirror.yml - include_tasks: start_container_rbd_mirror.yml +- name: Include start_container_rbd_mirror.yml + ansible.builtin.include_tasks: start_container_rbd_mirror.yml when: - containerized_deployment | bool - ceph_rbd_mirror_remote_user is defined diff --git a/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml b/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml index c1ac8cd37c..51d80e1134 100644 --- a/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml +++ b/roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml @@ -1,12 +1,12 @@ --- # Use systemd to manage container on Atomic host -- name: include_tasks systemd.yml - include_tasks: systemd.yml +- name: Include_tasks systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: systemd start rbd mirror container - systemd: +- name: Systemd start rbd mirror container + ansible.builtin.systemd: name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true diff --git a/roles/ceph-rbd-mirror/tasks/systemd.yml b/roles/ceph-rbd-mirror/tasks/systemd.yml index 60977e6717..6e9d98734d 100644 --- a/roles/ceph-rbd-mirror/tasks/systemd.yml +++ b/roles/ceph-rbd-mirror/tasks/systemd.yml @@ -1,22 +1,23 @@ --- -- name: generate systemd unit file - template: +- name: Generate systemd unit file + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2" dest: /etc/systemd/system/ceph-rbd-mirror@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph rbdmirrors + notify: Restart ceph rbdmirrors -- name: generate systemd ceph-rbd-mirror target file - copy: +- name: Generate systemd ceph-rbd-mirror target file + ansible.builtin.copy: src: ceph-rbd-mirror.target dest: /etc/systemd/system/ceph-rbd-mirror.target + mode: "0644" when: containerized_deployment | bool -- name: enable ceph-rbd-mirror.target - service: +- name: Enable ceph-rbd-mirror.target + ansible.builtin.service: name: ceph-rbd-mirror.target - enabled: yes - daemon_reload: yes - when: containerized_deployment | bool \ No newline at end of file + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-rgw-loadbalancer/defaults/main.yml b/roles/ceph-rgw-loadbalancer/defaults/main.yml index 3db54395ca..256a4f14f5 100644 --- a/roles/ceph-rgw-loadbalancer/defaults/main.yml +++ b/roles/ceph-rgw-loadbalancer/defaults/main.yml @@ -18,9 +18,9 @@ haproxy_ssl_options: - no-tlsv11 - no-tls-tickets # -#virtual_ips: -# - 192.168.238.250 -# - 192.168.238.251 +# virtual_ips: +# - 192.168.238.250 +# - 192.168.238.251 # -#virtual_ip_netmask: 24 -#virtual_ip_interface: ens33 +# virtual_ip_netmask: 24 +# virtual_ip_interface: ens33 diff --git a/roles/ceph-rgw-loadbalancer/handlers/main.yml b/roles/ceph-rgw-loadbalancer/handlers/main.yml index b75d9339e0..a68c0e26e8 100644 --- a/roles/ceph-rgw-loadbalancer/handlers/main.yml +++ b/roles/ceph-rgw-loadbalancer/handlers/main.yml @@ -1,10 +1,10 @@ --- -- name: restart haproxy - service: +- name: Restart haproxy + ansible.builtin.service: name: haproxy state: restarted -- name: restart keepalived - service: +- name: Restart keepalived + ansible.builtin.service: name: keepalived state: restarted diff --git a/roles/ceph-rgw-loadbalancer/meta/main.yml b/roles/ceph-rgw-loadbalancer/meta/main.yml index 24c34b1a34..624af347df 100644 --- a/roles/ceph-rgw-loadbalancer/meta/main.yml +++ b/roles/ceph-rgw-loadbalancer/meta/main.yml @@ -3,11 +3,11 @@ galaxy_info: author: Gui Hecheng description: Config HAProxy & Keepalived license: Apache - min_ansible_version: 2.8 + min_ansible_version: '2.8' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-rgw-loadbalancer/tasks/main.yml b/roles/ceph-rgw-loadbalancer/tasks/main.yml index 2229e778b6..53eb108fa9 100644 --- a/roles/ceph-rgw-loadbalancer/tasks/main.yml +++ b/roles/ceph-rgw-loadbalancer/tasks/main.yml @@ -1,6 +1,6 @@ --- -- name: include_tasks pre_requisite.yml - include_tasks: pre_requisite.yml +- name: Include_tasks pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml -- name: include_tasks start_rgw_loadbalancer.yml - include_tasks: start_rgw_loadbalancer.yml +- name: Include_tasks start_rgw_loadbalancer.yml + ansible.builtin.include_tasks: start_rgw_loadbalancer.yml diff --git a/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml b/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml index 45903c2205..37bced40a5 100644 --- a/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml +++ b/roles/ceph-rgw-loadbalancer/tasks/pre_requisite.yml @@ -1,51 +1,49 @@ --- -- name: install haproxy and keepalived - package: +- name: Install haproxy and keepalived + ansible.builtin.package: name: ['haproxy', 'keepalived'] state: present register: result until: result is succeeded -- name: "generate haproxy configuration file: haproxy.cfg" - template: +- name: Generate haproxy configuration file haproxy.cfg + ansible.builtin.template: src: haproxy.cfg.j2 dest: /etc/haproxy/haproxy.cfg owner: "root" group: "root" mode: "0644" validate: "haproxy -f %s -c" - notify: - - restart haproxy + notify: Restart haproxy -- name: set_fact vip to vrrp_instance - set_fact: - vrrp_instances: "{{ vrrp_instances | default([]) | union([{ 'name': 'VI_' + index|string , 'vip': item, 'master': groups[rgwloadbalancer_group_name][index] }]) }}" +- name: Set_fact vip to vrrp_instance + ansible.builtin.set_fact: + vrrp_instances: "{{ vrrp_instances | default([]) | union([{'name': 'VI_' + index | string, 'vip': item, 'master': groups[rgwloadbalancer_group_name][index]}]) }}" loop: "{{ virtual_ips | flatten(levels=1) }}" loop_control: index_var: index -- name: "generate keepalived: configuration file: keepalived.conf" - template: +- name: Generate keepalived configuration file keepalived.conf + ansible.builtin.template: src: keepalived.conf.j2 dest: /etc/keepalived/keepalived.conf owner: "root" group: "root" mode: "0644" - notify: - - restart keepalived + notify: Restart keepalived -- name: selinux related tasks +- name: Selinux related tasks when: - ansible_facts['os_family'] == 'RedHat' - ansible_facts['selinux']['status'] == 'enabled' block: - - name: set_fact rgw_ports - set_fact: + - name: Set_fact rgw_ports + ansible.builtin.set_fact: rgw_ports: "{{ rgw_ports | default([]) | union(hostvars[item]['rgw_instances'] | map(attribute='radosgw_frontend_port') | map('string') | list) }}" with_items: "{{ groups.get(rgw_group_name, []) }}" - - name: add selinux rules - seport: + - name: Add selinux rules + community.general.seport: ports: "{{ rgw_ports }}" proto: tcp setype: http_port_t diff --git a/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml b/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml index 344fe195cf..218fc3cd68 100644 --- a/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml +++ b/roles/ceph-rgw-loadbalancer/tasks/start_rgw_loadbalancer.yml @@ -1,12 +1,12 @@ --- -- name: start haproxy - service: +- name: Start haproxy + ansible.builtin.service: name: haproxy state: started - enabled: yes + enabled: true -- name: start keepalived - service: +- name: Start keepalived + ansible.builtin.service: name: keepalived state: started - enabled: yes + enabled: true diff --git a/roles/ceph-rgw/defaults/main.yml b/roles/ceph-rgw/defaults/main.yml index e86f80ef20..ac1eed2f6e 100644 --- a/roles/ceph-rgw/defaults/main.yml +++ b/roles/ceph-rgw/defaults/main.yml @@ -37,30 +37,30 @@ copy_admin_key: false # If the key doesn't exist it falls back to the default replicated_rule. # This only works for replicated pool type not erasure. -#rgw_create_pools: -# "{{ rgw_zone }}.rgw.buckets.data": -# pg_num: 64 -# type: ec -# ec_profile: myecprofile -# ec_k: 5 -# ec_m: 3 -# "{{ rgw_zone }}.rgw.buckets.index": -# pg_num: 16 -# size: 3 -# type: replicated -# "{{ rgw_zone }}.rgw.meta": -# pg_num: 8 -# size: 3 -# type: replicated -# "{{ rgw_zone }}.rgw.log": -# pg_num: 8 -# size: 3 -# type: replicated -# "{{ rgw_zone }}.rgw.control": -# pg_num: 8 -# size: 3 -# type: replicated -# rule_name: foo +# rgw_create_pools: +# "{{ rgw_zone }}.rgw.buckets.data": +# pg_num: 64 +# type: ec +# ec_profile: myecprofile +# ec_k: 5 +# ec_m: 3 +# "{{ rgw_zone }}.rgw.buckets.index": +# pg_num: 16 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.meta": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.log": +# pg_num: 8 +# size: 3 +# type: replicated +# "{{ rgw_zone }}.rgw.control": +# pg_num: 8 +# size: 3 +# type: replicated +# rule_name: foo ########## @@ -73,8 +73,8 @@ copy_admin_key: false # These options can be passed using the 'ceph_rgw_docker_extra_env' variable. ceph_rgw_docker_memory_limit: "4096m" ceph_rgw_docker_cpu_limit: 8 -#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" -#ceph_rgw_docker_cpuset_mems: "0" +# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16" +# ceph_rgw_docker_cpuset_mems: "0" ceph_rgw_docker_extra_env: ceph_config_keys: [] # DON'T TOUCH ME @@ -86,6 +86,6 @@ rgw_config_keys: "/" # DON'T TOUCH ME # ceph_rgw_systemd_overrides will override the systemd settings # for the ceph-rgw services. # For example,to set "PrivateDevices=false" you can specify: -#ceph_rgw_systemd_overrides: -# Service: -# PrivateDevices: False +# ceph_rgw_systemd_overrides: +# Service: +# PrivateDevices: false diff --git a/roles/ceph-rgw/handlers/main.yml b/roles/ceph-rgw/handlers/main.yml index 5d993b360e..a0344c7fa0 100644 --- a/roles/ceph-rgw/handlers/main.yml +++ b/roles/ceph-rgw/handlers/main.yml @@ -1,6 +1,6 @@ --- -- name: restart rgw - service: +- name: Restart rgw + ansible.builtin.service: name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: restarted with_items: "{{ rgw_instances }}" diff --git a/roles/ceph-rgw/meta/main.yml b/roles/ceph-rgw/meta/main.yml index 1a69cbece0..096ccda01b 100644 --- a/roles/ceph-rgw/meta/main.yml +++ b/roles/ceph-rgw/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Sébastien Han description: Installs Ceph Rados Gateway license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-rgw/tasks/common.yml b/roles/ceph-rgw/tasks/common.yml index e25fb4d244..3b2604d3c6 100644 --- a/roles/ceph-rgw/tasks/common.yml +++ b/roles/ceph-rgw/tasks/common.yml @@ -1,6 +1,6 @@ --- -- name: create rados gateway directories - file: +- name: Create rados gateway directories + ansible.builtin.file: path: "{{ item }}" state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -8,7 +8,7 @@ mode: "{{ ceph_directories_mode }}" with_items: "{{ rbd_client_admin_socket_path }}" -- name: get keys from monitors +- name: Get keys from monitors ceph_key: name: "{{ item.name }}" cluster: "{{ cluster }}" @@ -28,8 +28,8 @@ - item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: copy ceph key(s) if needed - copy: +- name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "{{ item.item.path }}" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -42,12 +42,12 @@ - item.item.copy_key | bool no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: copy SSL certificate & key data to certificate path - copy: +- name: Copy SSL certificate & key data to certificate path + ansible.builtin.copy: content: "{{ radosgw_frontend_ssl_certificate_data }}" dest: "{{ radosgw_frontend_ssl_certificate }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" - mode: 0440 + mode: "0440" when: radosgw_frontend_ssl_certificate | length > 0 and radosgw_frontend_ssl_certificate_data | length > 0 - notify: restart ceph rgws + notify: Restart ceph rgws diff --git a/roles/ceph-rgw/tasks/main.yml b/roles/ceph-rgw/tasks/main.yml index 7a44aabda0..e5214e6507 100644 --- a/roles/ceph-rgw/tasks/main.yml +++ b/roles/ceph-rgw/tasks/main.yml @@ -1,25 +1,25 @@ --- -- name: include common.yml - include_tasks: common.yml +- name: Include common.yml + ansible.builtin.include_tasks: common.yml -- name: include_tasks pre_requisite.yml - include_tasks: pre_requisite.yml +- name: Include_tasks pre_requisite.yml + ansible.builtin.include_tasks: pre_requisite.yml -- name: rgw pool creation tasks - include_tasks: rgw_create_pools.yml +- name: Rgw pool creation tasks + ansible.builtin.include_tasks: rgw_create_pools.yml run_once: true when: rgw_create_pools is defined -- name: include_tasks openstack-keystone.yml - include_tasks: openstack-keystone.yml +- name: Include_tasks openstack-keystone.yml + ansible.builtin.include_tasks: openstack-keystone.yml when: radosgw_keystone_ssl | bool -- name: include_tasks start_radosgw.yml - include_tasks: start_radosgw.yml +- name: Include_tasks start_radosgw.yml + ansible.builtin.include_tasks: start_radosgw.yml when: - not containerized_deployment | bool -- name: include start_docker_rgw.yml - include_tasks: start_docker_rgw.yml +- name: Include start_docker_rgw.yml + ansible.builtin.include_tasks: start_docker_rgw.yml when: - containerized_deployment | bool diff --git a/roles/ceph-rgw/tasks/openstack-keystone.yml b/roles/ceph-rgw/tasks/openstack-keystone.yml index d216ee15e3..61fbf8888b 100644 --- a/roles/ceph-rgw/tasks/openstack-keystone.yml +++ b/roles/ceph-rgw/tasks/openstack-keystone.yml @@ -1,30 +1,30 @@ --- -- name: install nss-tools on redhat - package: +- name: Install nss-tools on redhat + ansible.builtin.package: name: nss-tools state: present register: result until: result is succeeded when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf' -- name: install libnss3-tools on debian - package: +- name: Install libnss3-tools on debian + ansible.builtin.package: name: libnss3-tools state: present register: result until: result is succeeded when: ansible_facts['pkg_mgr'] == 'apt' -- name: create nss directory for keystone certificates - file: +- name: Create nss directory for keystone certificates + ansible.builtin.file: path: "{{ radosgw_nss_db_path }}" state: directory owner: root group: root - mode: 0644 + mode: "0644" -- name: create nss entries for keystone certificates - shell: "{{ item }}" +- name: Create nss entries for keystone certificates + ansible.builtin.shell: "{{ item }}" changed_when: false with_items: - "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'" diff --git a/roles/ceph-rgw/tasks/pre_requisite.yml b/roles/ceph-rgw/tasks/pre_requisite.yml index 11a49f763f..8af02e15d5 100644 --- a/roles/ceph-rgw/tasks/pre_requisite.yml +++ b/roles/ceph-rgw/tasks/pre_requisite.yml @@ -1,9 +1,9 @@ --- -- name: set_fact _rgw_hostname - set_fact: +- name: Set_fact _rgw_hostname + ansible.builtin.set_fact: _rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}" -- name: set rgw parameter (log file) +- name: Set rgw parameter (log file) ceph_config: action: set who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}" @@ -15,7 +15,7 @@ delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}" -- name: set rgw parameter (rgw_frontends) +- name: Set rgw parameter (rgw_frontends) ceph_config: action: set who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}" @@ -26,13 +26,13 @@ CEPH_CONTAINER_BINARY: "{{ container_binary }}" delegate_to: "{{ groups.get(mon_group_name, [])[0] }}" loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}" - notify: restart ceph rgws + notify: Restart ceph rgws # rgw_frontends # {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }} -- name: create rados gateway directories - file: +- name: Create rados gateway directories + ansible.builtin.file: path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" state: directory owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" @@ -42,7 +42,7 @@ loop: "{{ rgw_instances }}" when: groups.get(mon_group_name, []) | length > 0 -- name: create rgw keyrings +- name: Create rgw keyrings ceph_key: name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" cluster: "{{ cluster }}" @@ -57,14 +57,14 @@ group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" mode: "0600" no_log: "{{ no_log_on_ceph_key_tasks }}" - delegate_to: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else 'localhost'}}" + delegate_to: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else 'localhost' }}" environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" with_items: "{{ rgw_instances }}" when: cephx | bool -- name: get keys from monitors +- name: Get keys from monitors ceph_key: name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" cluster: "{{ cluster }}" @@ -81,8 +81,8 @@ - groups.get(mon_group_name, []) | length > 0 no_log: "{{ no_log_on_ceph_key_tasks }}" -- name: copy ceph key(s) if needed - copy: +- name: Copy ceph key(s) if needed + ansible.builtin.copy: dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring" content: "{{ item.stdout + '\n' }}" owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" diff --git a/roles/ceph-rgw/tasks/rgw_create_pools.yml b/roles/ceph-rgw/tasks/rgw_create_pools.yml index bbdac1b4c7..3f6d1c2dd5 100644 --- a/roles/ceph-rgw/tasks/rgw_create_pools.yml +++ b/roles/ceph-rgw/tasks/rgw_create_pools.yml @@ -1,5 +1,5 @@ --- -- name: create ec profile +- name: Create ec profile ceph_ec_profile: name: "{{ item.value.ec_profile }}" cluster: "{{ cluster }}" @@ -15,7 +15,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: set crush rule +- name: Set crush rule ceph_crush_rule: name: "{{ item.key }}" cluster: "{{ cluster }}" @@ -30,7 +30,7 @@ - item.value.type is defined - item.value.type == 'ec' -- name: create ec pools for rgw +- name: Create ec pools for rgw ceph_pool: name: "{{ item.key }}" state: present @@ -52,7 +52,7 @@ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" -- name: create replicated pools for rgw +- name: Create replicated pools for rgw ceph_pool: name: "{{ item.key }}" state: present diff --git a/roles/ceph-rgw/tasks/start_docker_rgw.yml b/roles/ceph-rgw/tasks/start_docker_rgw.yml index f4e3296ad1..cffeec811a 100644 --- a/roles/ceph-rgw/tasks/start_docker_rgw.yml +++ b/roles/ceph-rgw/tasks/start_docker_rgw.yml @@ -1,12 +1,12 @@ --- -- name: include_task systemd.yml - include_tasks: systemd.yml +- name: Include_task systemd.yml + ansible.builtin.include_tasks: systemd.yml -- name: systemd start rgw container - systemd: +- name: Systemd start rgw container + ansible.builtin.systemd: name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} state: started - enabled: yes - masked: no - daemon_reload: yes + enabled: true + masked: false + daemon_reload: true with_items: "{{ rgw_instances }}" diff --git a/roles/ceph-rgw/tasks/start_radosgw.yml b/roles/ceph-rgw/tasks/start_radosgw.yml index 4bc39cc376..805667548a 100644 --- a/roles/ceph-rgw/tasks/start_radosgw.yml +++ b/roles/ceph-rgw/tasks/start_radosgw.yml @@ -1,11 +1,12 @@ --- -- name: ensure systemd service override directory exists - file: +- name: Ensure systemd service override directory exists + ansible.builtin.file: state: directory path: "/etc/systemd/system/ceph-radosgw@.service.d/" + mode: "0750" when: ceph_rgw_systemd_overrides is defined -- name: add ceph-rgw systemd service overrides +- name: Add ceph-rgw systemd service overrides openstack.config_template.config_template: src: "ceph-rgw.service.d-overrides.j2" dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf" @@ -13,16 +14,16 @@ config_type: "ini" when: ceph_rgw_systemd_overrides is defined -- name: start rgw instance - service: +- name: Start rgw instance + ansible.builtin.systemd: name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }} state: started - enabled: yes - masked: no + enabled: true + masked: false with_items: "{{ rgw_instances }}" -- name: enable the ceph-radosgw.target service - systemd: +- name: Enable the ceph-radosgw.target service + ansible.builtin.systemd: name: ceph-radosgw.target - enabled: yes - masked: no + enabled: true + masked: false diff --git a/roles/ceph-rgw/tasks/systemd.yml b/roles/ceph-rgw/tasks/systemd.yml index 08457ec2fc..baca5af906 100644 --- a/roles/ceph-rgw/tasks/systemd.yml +++ b/roles/ceph-rgw/tasks/systemd.yml @@ -1,22 +1,23 @@ --- -- name: generate systemd unit file - template: +- name: Generate systemd unit file + ansible.builtin.template: src: "{{ role_path }}/templates/ceph-radosgw.service.j2" dest: /etc/systemd/system/ceph-radosgw@.service owner: "root" group: "root" mode: "0644" - notify: restart ceph rgws + notify: Restart ceph rgws -- name: generate systemd ceph-radosgw target file - copy: +- name: Generate systemd ceph-radosgw target file + ansible.builtin.copy: src: ceph-radosgw.target dest: /etc/systemd/system/ceph-radosgw.target + mode: "0644" when: containerized_deployment | bool -- name: enable ceph-radosgw.target - service: +- name: Enable ceph-radosgw.target + ansible.builtin.service: name: ceph-radosgw.target - enabled: yes - daemon_reload: yes - when: containerized_deployment | bool \ No newline at end of file + enabled: true + daemon_reload: true + when: containerized_deployment | bool diff --git a/roles/ceph-validate/meta/main.yml b/roles/ceph-validate/meta/main.yml index 1c6197efd3..01254db623 100644 --- a/roles/ceph-validate/meta/main.yml +++ b/roles/ceph-validate/meta/main.yml @@ -4,11 +4,11 @@ galaxy_info: author: Andrew Schoen description: Validates Ceph config options license: Apache - min_ansible_version: 2.7 + min_ansible_version: '2.7' platforms: - name: EL versions: - - 7 + - 'all' galaxy_tags: - system dependencies: [] diff --git a/roles/ceph-validate/tasks/check_devices.yml b/roles/ceph-validate/tasks/check_devices.yml index 78ee0f7176..a774add8a1 100644 --- a/roles/ceph-validate/tasks/check_devices.yml +++ b/roles/ceph-validate/tasks/check_devices.yml @@ -1,53 +1,53 @@ --- -- name: set_fact root_device - set_fact: +- name: Set_fact root_device + ansible.builtin.set_fact: root_device: "{{ ansible_facts['mounts'] | selectattr('mount', 'match', '^/$') | map(attribute='device') | first }}" -- name: lvm_volumes variable's tasks related +- name: Lvm_volumes variable's tasks related when: - lvm_volumes is defined - lvm_volumes | length > 0 block: - - name: resolve devices in lvm_volumes - command: "readlink -f {{ item.data }}" + - name: Resolve devices in lvm_volumes + ansible.builtin.command: "readlink -f {{ item.data }}" changed_when: false register: _lvm_volumes_data_devices with_items: "{{ lvm_volumes }}" when: item.data_vg is undefined - - name: set_fact lvm_volumes_data_devices - set_fact: + - name: Set_fact lvm_volumes_data_devices + ansible.builtin.set_fact: lvm_volumes_data_devices: "{{ lvm_volumes_data_devices | default([]) + [item.stdout] }}" with_items: "{{ _lvm_volumes_data_devices.results }}" when: item.skipped is undefined -- name: fail if root_device is passed in lvm_volumes or devices - fail: +- name: Fail if root_device is passed in lvm_volumes or devices + ansible.builtin.fail: msg: "{{ root_device }} found in either lvm_volumes or devices variable" when: root_device in lvm_volumes_data_devices | default([]) or root_device in devices | default([]) -- name: check devices are block devices +- name: Check devices are block devices block: - - name: get devices information - parted: + - name: Get devices information + community.general.parted: device: "{{ item }}" unit: MiB register: devices_parted - failed_when: False + failed_when: false with_items: - "{{ devices | default([]) }}" - "{{ dedicated_devices | default([]) }}" - "{{ bluestore_wal_devices | default([]) }}" - "{{ lvm_volumes_data_devices | default([]) }}" - - name: fail if one of the devices is not a device - fail: + - name: Fail if one of the devices is not a device + ansible.builtin.fail: msg: "{{ item.item }} is not a block special file!" when: item.rc is defined with_items: "{{ devices_parted.results }}" - - name: fail when gpt header found on osd devices - fail: + - name: Fail when gpt header found on osd devices + ansible.builtin.fail: msg: "{{ item.disk.dev }} has gpt header, please remove it." with_items: "{{ devices_parted.results }}" when: @@ -55,11 +55,11 @@ - item.disk.table == 'gpt' - item.partitions | length == 0 -- name: check logical volume in lvm_volumes +- name: Check logical volume in lvm_volumes when: lvm_volumes is defined block: - - name: check data logical volume - stat: + - name: Check data logical volume + ansible.builtin.stat: path: "/dev/{{ item.data_vg }}/{{ item.data }}" follow: true register: lvm_volumes_data @@ -68,16 +68,16 @@ - item.data is defined - item.data_vg is defined - - name: fail if one of the data logical volume is not a device or doesn't exist - fail: + - name: Fail if one of the data logical volume is not a device or doesn't exist + ansible.builtin.fail: msg: "{{ item.item.data_vg }}/{{ item.item.data }} doesn't exist or isn't a block" loop: "{{ lvm_volumes_data.results }}" when: - item.skipped is undefined - not item.stat.exists | bool or not item.stat.isblk | bool - - name: check bluestore db logical volume - stat: + - name: Check bluestore db logical volume + ansible.builtin.stat: path: "/dev/{{ item.db_vg }}/{{ item.db }}" follow: true register: lvm_volumes_db @@ -87,16 +87,16 @@ - item.db is defined - item.db_vg is defined - - name: fail if one of the bluestore db logical volume is not a device or doesn't exist - fail: + - name: Fail if one of the bluestore db logical volume is not a device or doesn't exist + ansible.builtin.fail: msg: "{{ item.item.db_vg }}/{{ item.item.db }} doesn't exist or isn't a block" loop: "{{ lvm_volumes_db.results }}" when: - item.skipped is undefined - not item.stat.exists | bool or not item.stat.isblk | bool - - name: check bluestore wal logical volume - stat: + - name: Check bluestore wal logical volume + ansible.builtin.stat: path: "/dev/{{ item.wal_vg }}/{{ item.wal }}" follow: true register: lvm_volumes_wal @@ -106,11 +106,10 @@ - item.wal is defined - item.wal_vg is defined - - name: fail if one of the bluestore wal logical volume is not a device or doesn't exist - fail: + - name: Fail if one of the bluestore wal logical volume is not a device or doesn't exist + ansible.builtin.fail: msg: "{{ item.item.wal_vg }}/{{ item.item.wal }} doesn't exist or isn't a block" loop: "{{ lvm_volumes_wal.results }}" when: - item.skipped is undefined - not item.stat.exists | bool or not item.stat.isblk | bool - diff --git a/roles/ceph-validate/tasks/check_eth_mon.yml b/roles/ceph-validate/tasks/check_eth_mon.yml index 17251127ee..21a58f3693 100644 --- a/roles/ceph-validate/tasks/check_eth_mon.yml +++ b/roles/ceph-validate/tasks/check_eth_mon.yml @@ -1,23 +1,23 @@ --- -- name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}" - fail: +- name: Check if network interface exists + ansible.builtin.fail: msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}" when: monitor_interface not in ansible_facts['interfaces'] -- name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}" - fail: +- name: Check if network interface is active + ansible.builtin.fail: msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}" when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active'] -- name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}" - fail: +- name: Check if network interface has an IPv4 address + ansible.builtin.fail: msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}" when: - ip_version == "ipv4" - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined -- name: "fail if {{ monitor_interface }} does not have any ip v6 address on {{ inventory_hostname }}" - fail: +- name: Check if network interface has an IPv6 address + ansible.builtin.fail: msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}" when: - ip_version == "ipv6" diff --git a/roles/ceph-validate/tasks/check_eth_rgw.yml b/roles/ceph-validate/tasks/check_eth_rgw.yml index c2438cf3dd..8ad337470e 100644 --- a/roles/ceph-validate/tasks/check_eth_rgw.yml +++ b/roles/ceph-validate/tasks/check_eth_rgw.yml @@ -1,23 +1,23 @@ --- -- name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}" - fail: +- name: Check if network interface exists + ansible.builtin.fail: msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}" when: radosgw_interface not in ansible_facts['interfaces'] -- name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}" - fail: +- name: Check if network interface is active + ansible.builtin.fail: msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}" when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false" -- name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}" - fail: +- name: Check if network interface has an IPv4 address + ansible.builtin.fail: msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}" when: - ip_version == "ipv4" - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined -- name: "fail if {{ radosgw_interface }} does not have any ip v6 address on {{ inventory_hostname }}" - fail: +- name: Check if network interface has an IPv6 address + ansible.builtin.fail: msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}" when: - ip_version == "ipv6" diff --git a/roles/ceph-validate/tasks/check_ipaddr_mon.yml b/roles/ceph-validate/tasks/check_ipaddr_mon.yml index 734cd69700..9183b385bf 100644 --- a/roles/ceph-validate/tasks/check_ipaddr_mon.yml +++ b/roles/ceph-validate/tasks/check_ipaddr_mon.yml @@ -1,5 +1,5 @@ --- -- name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}" - fail: +- name: Check if network interface has an IP address in `monitor_address_block` + ansible.builtin.fail: msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}" when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0 diff --git a/roles/ceph-validate/tasks/check_iscsi.yml b/roles/ceph-validate/tasks/check_iscsi.yml index 3968caf94f..25673ebcf2 100644 --- a/roles/ceph-validate/tasks/check_iscsi.yml +++ b/roles/ceph-validate/tasks/check_iscsi.yml @@ -1,37 +1,37 @@ --- -- name: fail on unsupported distribution for iscsi gateways - fail: +- name: Fail on unsupported distribution for iscsi gateways + ansible.builtin.fail: msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora" when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora', 'AlmaLinux', 'Rocky'] -- name: make sure gateway_ip_list is configured - fail: +- name: Make sure gateway_ip_list is configured + ansible.builtin.fail: msg: "you must set a list of IPs (comma separated) for gateway_ip_list" when: - gateway_ip_list == '0.0.0.0' - not containerized_deployment | bool - not use_new_ceph_iscsi | bool -- name: make sure gateway_iqn is configured - fail: +- name: Make sure gateway_iqn is configured + ansible.builtin.fail: msg: "you must set a iqn for the iSCSI target" when: - gateway_iqn | length == 0 - not containerized_deployment | bool - not use_new_ceph_iscsi | bool -- name: fail if unsupported chap configuration - fail: +- name: Fail if unsupported chap configuration + ansible.builtin.fail: msg: "Mixing clients with CHAP enabled and disabled is not supported." - with_items: "{{ client_connections }}" + with_items: "{{ client_connections }}" when: - item.status is defined - item.status == "present" - item.chap - " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list" -- name: fail on unsupported distribution version for iscsi gateways - command: "grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}" +- name: Fail on unsupported distribution version for iscsi gateways + ansible.builtin.command: "grep -q {{ item }}=m {% if is_atomic | bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}" register: iscsi_kernel changed_when: false failed_when: iscsi_kernel.rc != 0 diff --git a/roles/ceph-validate/tasks/check_nfs.yml b/roles/ceph-validate/tasks/check_nfs.yml index 91ef3a2007..2c26aa4bef 100644 --- a/roles/ceph-validate/tasks/check_nfs.yml +++ b/roles/ceph-validate/tasks/check_nfs.yml @@ -1,14 +1,14 @@ --- -- name: fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone) - fail: +- name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone) + ansible.builtin.fail: msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True" when: - nfs_obj_gw | bool - groups.get(mon_group_name, []) | length == 0 - (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined) -- name: fail on openSUSE Leap 15.x using distro packages - fail: +- name: Fail on openSUSE Leap 15.x using distro packages + ansible.builtin.fail: msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')" when: - ceph_origin == 'distro' diff --git a/roles/ceph-validate/tasks/check_pools.yml b/roles/ceph-validate/tasks/check_pools.yml index d4441fda04..bca9d26ff4 100644 --- a/roles/ceph-validate/tasks/check_pools.yml +++ b/roles/ceph-validate/tasks/check_pools.yml @@ -1,6 +1,6 @@ --- -- name: fail if target_size_ratio is not set when pg_autoscale_mode is True - fail: +- name: Fail if target_size_ratio is not set when pg_autoscale_mode is True + ansible.builtin.fail: msg: "You must set a target_size_ratio value on following pool: {{ item.name }}." with_items: - "{{ openstack_pools | default([]) }}" diff --git a/roles/ceph-validate/tasks/check_rbdmirror.yml b/roles/ceph-validate/tasks/check_rbdmirror.yml index 3cfa21f9ce..59ac8ead3d 100644 --- a/roles/ceph-validate/tasks/check_rbdmirror.yml +++ b/roles/ceph-validate/tasks/check_rbdmirror.yml @@ -1,12 +1,12 @@ --- -- name: ensure ceph_rbd_mirror_pool is set - fail: +- name: Ensure ceph_rbd_mirror_pool is set + ansible.builtin.fail: msg: "ceph_rbd_mirror_pool needs to be provided" when: ceph_rbd_mirror_pool | default("") | length == 0 -- name: ensure ceph_rbd_mirror_remote_cluster is set - fail: +- name: Ensure ceph_rbd_mirror_remote_cluster is set + ansible.builtin.fail: msg: "ceph_rbd_mirror_remote_cluster needs to be provided" when: - ceph_rbd_mirror_remote_cluster | default("") | length == 0 - - ceph_rbd_mirror_remote_user | default("") | length > 0 \ No newline at end of file + - ceph_rbd_mirror_remote_user | default("") | length > 0 diff --git a/roles/ceph-validate/tasks/check_repository.yml b/roles/ceph-validate/tasks/check_repository.yml index 89d74ffa92..6e556fb15a 100644 --- a/roles/ceph-validate/tasks/check_repository.yml +++ b/roles/ceph-validate/tasks/check_repository.yml @@ -1,17 +1,17 @@ -- name: validate ceph_origin - fail: +- name: Validate ceph_origin + ansible.builtin.fail: msg: "ceph_origin must be either 'repository', 'distro' or 'local'" when: ceph_origin not in ['repository', 'distro', 'local'] -- name: validate ceph_repository - fail: +- name: Validate ceph_repository + ansible.builtin.fail: msg: "ceph_repository must be either 'community', 'rhcs', 'obs', 'dev', 'custom' or 'uca'" when: - ceph_origin == 'repository' - ceph_repository not in ['community', 'rhcs', 'obs', 'dev', 'custom', 'uca'] -- name: validate ceph_repository_community - fail: +- name: Validate ceph_repository_community + ansible.builtin.fail: msg: "ceph_stable_release must be 'reef'" when: - ceph_origin == 'repository' diff --git a/roles/ceph-validate/tasks/check_rgw_multisite.yml b/roles/ceph-validate/tasks/check_rgw_multisite.yml index 6693f00b32..7679cd86ff 100644 --- a/roles/ceph-validate/tasks/check_rgw_multisite.yml +++ b/roles/ceph-validate/tasks/check_rgw_multisite.yml @@ -1,66 +1,65 @@ --- -- name: fail if rgw_zone is default - fail: +- name: Fail if rgw_zone is default + ansible.builtin.fail: msg: "rgw_zone cannot be named 'default'" loop: "{{ rgw_instances }}" when: item.rgw_zone is undefined or item.rgw_zone == 'default' -- name: fail if either rgw_zonemaster or rgw_zonesecondary is undefined - fail: +- name: Fail if either rgw_zonemaster or rgw_zonesecondary is undefined + ansible.builtin.fail: msg: "rgw_zonemaster and rgw_zonesecondary must be defined" loop: "{{ rgw_instances }}" when: item.rgw_zonemaster | default(rgw_zonemaster) is undefined or item.rgw_zonesecondary | default(rgw_zonesecondary) is undefined -- name: fail if rgw_zonemaster and rgw_zonesecondary are both true - fail: +- name: Fail if rgw_zonemaster and rgw_zonesecondary are both true + ansible.builtin.fail: msg: "rgw_zonemaster and rgw_zonesecondary cannot both be true" loop: "{{ rgw_instances }}" when: - item.rgw_zonemaster | default(rgw_zonemaster) | bool - item.rgw_zonesecondary | default(rgw_zonesecondary) | bool -- name: fail if rgw_zonegroup is not set - fail: +- name: Fail if rgw_zonegroup is not set + ansible.builtin.fail: msg: "rgw_zonegroup has not been set by the user" loop: "{{ rgw_instances }}" when: item.rgw_zonegroup is undefined -- name: fail if rgw_zone_user is not set - fail: +- name: Fail if rgw_zone_user is not set + ansible.builtin.fail: msg: "rgw_zone_user has not been set by the user" loop: "{{ rgw_instances }}" when: item.rgw_zone_user is undefined -- name: fail if rgw_zone_user_display_name is not set - fail: +- name: Fail if rgw_zone_user_display_name is not set + ansible.builtin.fail: msg: "rgw_zone_user_display_name has not been set by the user" loop: "{{ rgw_instances }}" when: item.rgw_zone_user_display_name is undefined -- name: fail if rgw_realm is not set - fail: +- name: Fail if rgw_realm is not set + ansible.builtin.fail: msg: "rgw_realm has not been set by the user" loop: "{{ rgw_instances }}" when: item.rgw_realm is undefined -- name: fail if system_access_key is not set - fail: +- name: Fail if system_access_key is not set + ansible.builtin.fail: msg: "system_access_key has not been set by the user" loop: "{{ rgw_instances }}" when: item.system_access_key is undefined -- name: fail if system_secret_key is not set - fail: +- name: Fail if system_secret_key is not set + ansible.builtin.fail: msg: "system_secret_key has not been set by the user" loop: "{{ rgw_instances }}" when: item.system_secret_key is undefined -- name: fail if endpoint is not set - fail: +- name: Fail if endpoint is not set + ansible.builtin.fail: msg: "endpoint has not been set by the user" loop: "{{ rgw_instances }}" when: - item.rgw_zonesecondary | default(rgw_zonesecondary) | bool - rgw_pull_port is undefined and rgw_pullhost is undefined and item.rgw_pull_proto | default(rgw_pull_proto) is undefined - item.endpoint is undefined - diff --git a/roles/ceph-validate/tasks/check_rgw_pools.yml b/roles/ceph-validate/tasks/check_rgw_pools.yml index a4f5412c7e..ae30eec6d1 100644 --- a/roles/ceph-validate/tasks/check_rgw_pools.yml +++ b/roles/ceph-validate/tasks/check_rgw_pools.yml @@ -1,6 +1,6 @@ --- -- name: fail if ec_profile is not set for ec pools - fail: +- name: Fail if ec_profile is not set for ec pools + ansible.builtin.fail: msg: "ec_profile must be set for ec pools" loop: "{{ rgw_create_pools | dict2items }}" when: @@ -8,8 +8,8 @@ - item.value.type == 'ec' - item.value.ec_profile is undefined -- name: fail if ec_k is not set for ec pools - fail: +- name: Fail if ec_k is not set for ec pools + ansible.builtin.fail: msg: "ec_k must be set for ec pools" loop: "{{ rgw_create_pools | dict2items }}" when: @@ -17,8 +17,8 @@ - item.value.type == 'ec' - item.value.ec_k is undefined -- name: fail if ec_m is not set for ec pools - fail: +- name: Fail if ec_m is not set for ec pools + ansible.builtin.fail: msg: "ec_m must be set for ec pools" loop: "{{ rgw_create_pools | dict2items }}" when: diff --git a/roles/ceph-validate/tasks/check_system.yml b/roles/ceph-validate/tasks/check_system.yml index 0f19d2cdc6..da6af3e892 100644 --- a/roles/ceph-validate/tasks/check_system.yml +++ b/roles/ceph-validate/tasks/check_system.yml @@ -1,31 +1,31 @@ --- -- name: fail on unsupported ansible version (1.X) - fail: +- name: Fail on unsupported ansible version (1.X) + ansible.builtin.fail: msg: "Ansible version must be >= 2.x, please update!" when: ansible_version.major|int < 2 -- name: fail on unsupported ansible version - fail: +- name: Fail on unsupported ansible version + ansible.builtin.fail: msg: "Ansible version must be either 2.15 or 2.16!" when: ansible_version.minor|int not in [15, 16] -- name: fail on unsupported system - fail: +- name: Fail on unsupported system + ansible.builtin.fail: msg: "System not supported {{ ansible_facts['system'] }}" when: ansible_facts['system'] not in ['Linux'] -- name: fail on unsupported architecture - fail: +- name: Fail on unsupported architecture + ansible.builtin.fail: msg: "Architecture not supported {{ ansible_facts['architecture'] }}" when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64'] -- name: fail on unsupported distribution - fail: +- name: Fail on unsupported distribution + ansible.builtin.fail: msg: "Distribution not supported {{ ansible_facts['os_family'] }}" when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse'] -- name: fail on unsupported CentOS release - fail: +- name: Fail on unsupported CentOS release + ansible.builtin.fail: msg: "CentOS release {{ ansible_facts['distribution_major_version'] }} not supported with dashboard" when: - ansible_facts['distribution'] == 'CentOS' @@ -33,31 +33,31 @@ - not containerized_deployment | bool - dashboard_enabled | bool -- name: red hat based systems tasks +- name: Red hat based systems tasks when: - ceph_repository == 'rhcs' - ansible_facts['distribution'] == 'RedHat' block: - - name: fail on unsupported distribution for red hat ceph storage - fail: + - name: Fail on unsupported distribution for red hat ceph storage + ansible.builtin.fail: msg: "Distribution not supported {{ ansible_facts['distribution_version'] }} by Red Hat Ceph Storage, only RHEL >= 8.2" when: ansible_facts['distribution_version'] is version('8.2', '<') -- name: fail on unsupported distribution for ubuntu cloud archive - fail: +- name: Fail on unsupported distribution for ubuntu cloud archive + ansible.builtin.fail: msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}" when: - ceph_repository == 'uca' - ansible_facts['distribution'] != 'Ubuntu' -- name: "fail on unsupported SUSE/openSUSE distribution (only 15.x supported)" - fail: +- name: Fail on unsupported SUSE/openSUSE distribution (only 15.x supported) + ansible.builtin.fail: msg: "Distribution not supported: {{ ansible_facts['distribution'] }} {{ ansible_facts['distribution_major_version'] }}" when: - ansible_facts['distribution'] == 'openSUSE Leap' or ansible_facts['distribution'] == 'SUSE' - ansible_facts['distribution_major_version'] != '15' -- name: fail if systemd is not present - fail: +- name: Fail if systemd is not present + ansible.builtin.fail: msg: "Systemd must be present" when: ansible_facts['service_mgr'] != 'systemd' diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index 8014406695..8b4ad48b54 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -1,18 +1,18 @@ --- -- name: include check_system.yml - include_tasks: check_system.yml +- name: Include check_system.yml + ansible.builtin.include_tasks: check_system.yml -- name: validate repository variables in non-containerized scenario - include_tasks: check_repository.yml +- name: Validate repository variables in non-containerized scenario + ansible.builtin.include_tasks: check_repository.yml when: not containerized_deployment | bool -- name: validate osd_objectstore - fail: +- name: Validate osd_objectstore + ansible.builtin.fail: msg: "osd_objectstore must be 'bluestore''" when: osd_objectstore not in ['bluestore'] -- name: validate monitor network configuration - fail: +- name: Validate monitor network configuration + ansible.builtin.fail: msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided" when: - mon_group_name in group_names @@ -20,8 +20,8 @@ - monitor_address_block == 'subnet' - monitor_interface == 'interface' -- name: validate radosgw network configuration - fail: +- name: Validate radosgw network configuration + ansible.builtin.fail: msg: "Either radosgw_address, radosgw_address_block or radosgw_interface must be provided" when: - rgw_group_name in group_names @@ -29,19 +29,19 @@ - radosgw_address_block == 'subnet' - radosgw_interface == 'interface' -- name: validate osd nodes +- name: Validate osd nodes when: osd_group_name in group_names block: - - name: validate lvm osd scenario - fail: + - name: Validate lvm osd scenario + ansible.builtin.fail: msg: 'devices or lvm_volumes must be defined for lvm osd scenario' when: - not osd_auto_discovery | default(false) | bool - devices is undefined - lvm_volumes is undefined - - name: validate bluestore lvm osd scenario - fail: + - name: Validate bluestore lvm osd scenario + ansible.builtin.fail: msg: 'data key must be defined in lvm_volumes' when: - osd_objectstore == 'bluestore' @@ -51,16 +51,16 @@ - item.data is undefined with_items: '{{ lvm_volumes }}' -- name: debian based systems tasks +- name: Debian based systems tasks when: ansible_facts['os_family'] == 'Debian' block: - - name: fail if local scenario is enabled on debian - fail: + - name: Fail if local scenario is enabled on debian + ansible.builtin.fail: msg: "'local' installation scenario not supported on Debian systems" when: ceph_origin == 'local' - - name: fail if rhcs repository is enabled on debian - fail: + - name: Fail if rhcs repository is enabled on debian + ansible.builtin.fail: msg: "RHCS isn't supported anymore on Debian distribution" when: - ceph_origin == 'repository' @@ -73,120 +73,121 @@ when: ansible_facts['os_family'] == 'Suse' block: - name: Check ceph_origin definition on SUSE/openSUSE Leap - fail: + ansible.builtin.fail: msg: "Unsupported installation method origin:{{ ceph_origin }}" when: ceph_origin not in ['distro', 'repository'] - name: Check ceph_repository definition on SUSE/openSUSE Leap - fail: + ansible.builtin.fail: msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}' only valid combination is ceph_origin == 'repository' and ceph_repository == 'obs'" when: - ceph_origin == 'repository' - ceph_repository != 'obs' -- name: validate ntp daemon type - fail: +- name: Validate ntp daemon type + ansible.builtin.fail: msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd" when: - ntp_service_enabled | bool - ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd'] # Since NTPd can not be installed on Atomic... -- name: abort if ntp_daemon_type is ntpd on Atomic - fail: +- name: Abort if ntp_daemon_type is ntpd on Atomic + ansible.builtin.fail: msg: installation can't happen on Atomic and ntpd needs to be installed when: - is_atomic | default(False) | bool - ansible_facts['os_family'] == 'RedHat' - ntp_daemon_type == 'ntpd' -- name: include check_devices.yml - include_tasks: check_devices.yml +- name: Include check_devices.yml + ansible.builtin.include_tasks: check_devices.yml when: - osd_group_name in group_names - not osd_auto_discovery | default(False) | bool -- name: include check_eth_mon.yml - include_tasks: check_eth_mon.yml +- name: Include check_eth_mon.yml + ansible.builtin.include_tasks: check_eth_mon.yml when: - mon_group_name in group_names - monitor_interface != "dummy" - monitor_address == "x.x.x.x" - monitor_address_block == "subnet" -- name: include check_ipaddr_mon.yml - include_tasks: check_ipaddr_mon.yml +- name: Include check_ipaddr_mon.yml + ansible.builtin.include_tasks: check_ipaddr_mon.yml when: - mon_group_name in group_names - monitor_interface == "interface" - monitor_address == "x.x.x.x" - monitor_address_block != "subnet" -- name: include check_eth_rgw.yml - include_tasks: check_eth_rgw.yml +- name: Include check_eth_rgw.yml + ansible.builtin.include_tasks: check_eth_rgw.yml when: - rgw_group_name in group_names - radosgw_interface != "dummy" - radosgw_address == "x.x.x.x" - radosgw_address_block == "subnet" -- name: include check_rgw_pools.yml - include_tasks: check_rgw_pools.yml +- name: Include check_rgw_pools.yml + ansible.builtin.include_tasks: check_rgw_pools.yml when: - inventory_hostname in groups.get(rgw_group_name, []) - rgw_create_pools is defined -- name: include check_iscsi.yml - include_tasks: check_iscsi.yml +- name: Include check_iscsi.yml + ansible.builtin.include_tasks: check_iscsi.yml when: iscsi_gw_group_name in group_names -- name: include check_nfs.yml - include_tasks: check_nfs.yml +- name: Include check_nfs.yml + ansible.builtin.include_tasks: check_nfs.yml when: inventory_hostname in groups.get(nfs_group_name, []) -- name: include check_rbdmirror.yml - include_tasks: check_rbdmirror.yml +- name: Include check_rbdmirror.yml + ansible.builtin.include_tasks: check_rbdmirror.yml when: - rbdmirror_group_name in group_names - ceph_rbd_mirror_configure | default(false) | bool -- block: - - name: fail if monitoring group doesn't exist - fail: +- name: Monitoring related tasks + when: dashboard_enabled | bool + block: + - name: Fail if monitoring group doesn't exist + ansible.builtin.fail: msg: "you must add a monitoring group and add at least one node." when: groups[monitoring_group_name] is undefined - - name: fail when monitoring doesn't contain at least one node. - fail: + - name: Fail when monitoring doesn't contain at least one node. + ansible.builtin.fail: msg: "you must add at least one node in the monitoring hosts group" when: groups[monitoring_group_name] | length < 1 - - name: fail when dashboard_admin_password and/or grafana_admin_password are not set - fail: + - name: Fail when dashboard_admin_password and/or grafana_admin_password are not set + ansible.builtin.fail: msg: "you must set dashboard_admin_password and grafana_admin_password." when: - dashboard_admin_password is undefined or grafana_admin_password is undefined - when: dashboard_enabled | bool -- name: validate container registry credentials - fail: +- name: Validate container registry credentials + ansible.builtin.fail: msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set' when: - ceph_docker_registry_auth | bool - (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or (ceph_docker_registry_username | string | length == 0 or ceph_docker_registry_password | string | length == 0) -- name: validate container service and container package - fail: +- name: Validate container service and container package + ansible.builtin.fail: msg: 'both container_package_name and container_service_name should be defined' when: - (container_package_name is undefined and container_service_name is defined) or (container_package_name is defined and container_service_name is undefined) -- name: validate openstack_keys key format - fail: +- name: Validate openstack_keys key format + ansible.builtin.fail: msg: '{{ item.name }} key format invalid' with_items: '{{ openstack_keys }}' when: @@ -196,8 +197,8 @@ - item.key is defined - item.key is not match("^[a-zA-Z0-9+/]{38}==$") -- name: validate clients keys key format - fail: +- name: Validate clients keys key format + ansible.builtin.fail: msg: '{{ item.name }} key format invalid' with_items: '{{ keys }}' when: @@ -207,8 +208,8 @@ - item.key is defined - item.key is not match("^[a-zA-Z0-9+/]{38}==$") -- name: validate openstack_keys caps - fail: +- name: Validate openstack_keys caps + ansible.builtin.fail: msg: '{{ item.name }} key has no caps defined' with_items: '{{ openstack_keys }}' when: @@ -217,8 +218,8 @@ - openstack_keys | length > 0 - item.caps is not defined -- name: validate clients keys caps - fail: +- name: Validate clients keys caps + ansible.builtin.fail: msg: '{{ item.name }} key has no caps defined' with_items: '{{ keys }}' when: @@ -227,16 +228,16 @@ - keys | length > 0 - item.caps is not defined -- name: check virtual_ips is defined - fail: +- name: Check virtual_ips is defined + ansible.builtin.fail: msg: "virtual_ips is not defined." when: - rgwloadbalancer_group_name in group_names - groups[rgwloadbalancer_group_name] | length > 0 - virtual_ips is not defined -- name: validate virtual_ips length - fail: +- name: Validate virtual_ips length + ansible.builtin.fail: msg: "There are more virual_ips defined than rgwloadbalancer nodes" when: - rgwloadbalancer_group_name in group_names