diff --git a/Vagrantfile b/Vagrantfile index dde4b30893..1824116497 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -25,8 +25,8 @@ GRAFANA = settings['grafana_server_vms'] NRBD_MIRRORS = settings['rbd_mirror_vms'] CLIENTS = settings['client_vms'] MGRS = settings['mgr_vms'] -PUBLIC_SUBNET = settings['public_subnet'] -CLUSTER_SUBNET = settings['cluster_subnet'] +PUBLIC_SUBNET = ENV['CEPH_PUBLIC_SUBNET'] || settings['public_subnet'] +CLUSTER_SUBNET = ENV['CEPH_CLUSTER_SUBNET'] || settings['cluster_subnet'] BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box'] CLIENT_BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['client_vagrant_box'] || BOX BOX_URL = ENV['CEPH_ANSIBLE_VAGRANT_BOX_URL'] || settings['vagrant_box_url'] @@ -88,7 +88,6 @@ ansible_provision = proc do |ansible| if DOCKER then ansible.extra_vars = ansible.extra_vars.merge({ containerized_deployment: 'true', - monitor_interface: ETH, ceph_mon_docker_subnet: ansible.extra_vars[:public_network], devices: settings['disks'], radosgw_interface: ETH, @@ -97,7 +96,6 @@ ansible_provision = proc do |ansible| else ansible.extra_vars = ansible.extra_vars.merge({ devices: settings['disks'], - monitor_interface: ETH, radosgw_interface: ETH, os_tuning_params: settings['os_tuning_params'], }) @@ -105,8 +103,6 @@ ansible_provision = proc do |ansible| if BOX == 'linode' then ansible.sudo = true - # Use monitor_address_block instead of monitor_interface: - ansible.extra_vars.delete(:monitor_interface) # Use radosgw_address_block instead of radosgw_interface: ansible.extra_vars.delete(:radosgw_interface) ansible.extra_vars = ansible.extra_vars.merge({ diff --git a/docs/source/index.rst b/docs/source/index.rst index 1d20060d6d..71b2ce8603 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -228,7 +228,6 @@ An example configuration that deploys the upstream ``octopus`` version of Ceph w ceph_repository: community public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" - monitor_interface: eth1 devices: - '/dev/sda' - '/dev/sdb' @@ -238,7 +237,6 @@ selection or other aspects of your cluster. - ``ceph_origin`` - ``public_network`` -- ``monitor_interface`` or ``monitor_address`` When deploying RGW instance(s) you are required to set the ``radosgw_interface`` or ``radosgw_address`` config option. diff --git a/docs/source/testing/scenarios.rst b/docs/source/testing/scenarios.rst index 185f994633..c05d91899a 100644 --- a/docs/source/testing/scenarios.rst +++ b/docs/source/testing/scenarios.rst @@ -130,15 +130,15 @@ way so that a vagrant environment can be isolated to the given scenario. The ``hosts`` file should contain the hosts needed for the scenario. This might seem a bit repetitive since machines are already defined in :ref:`vagrant_variables` but it allows granular changes to hosts (for example -defining an interface vs. an IP on a monitor) which can help catch issues in +defining different public_network values between monitors) which can help catch issues in ``ceph-ansible`` configuration. For example: .. code-block:: ini [mons] - mon0 monitor_address=192.168.5.10 - mon1 monitor_address=192.168.5.11 - mon2 monitor_interface=eth1 + mon0 public_network=192.168.1.0/24 + mon1 public_network=192.168.2.0/24 + mon2 public_network=192.168.3.0/24 .. _group_vars: diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 94c8389843..72a5bc6b8b 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -311,14 +311,6 @@ dummy: #rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor ## Monitor options -# -# You must define either monitor_interface, monitor_address or monitor_address_block. -# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). -# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. -# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. -#monitor_interface: interface -#monitor_address: x.x.x.x -#monitor_address_block: subnet # set to either ipv4 or ipv6, whichever your network is using #ip_version: ipv4 @@ -519,7 +511,7 @@ dummy: # DOCKER # ########## #ceph_docker_image: "ceph/daemon-base" -#ceph_docker_image_tag: latest-main +#ceph_docker_image_tag: latest-reef #ceph_docker_registry: quay.io #ceph_docker_registry_auth: false # ceph_docker_registry_username: @@ -562,64 +554,6 @@ dummy: #docker_pull_timeout: "300s" -############# -# OPENSTACK # -############# -#openstack_config: false -# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value -# `pg_num` and `pgp_num` keys will be ignored, even if specified. -# eg: -# openstack_glance_pool: -# name: "images" -# rule_name: "my_replicated_rule" -# application: "rbd" -# pg_autoscale_mode: false -# pg_num: 16 -# pgp_num: 16 -# target_size_ratio: 0.2 -#openstack_glance_pool: -# name: "images" -# application: "rbd" -#openstack_cinder_pool: -# name: "volumes" -# application: "rbd" -#openstack_nova_pool: -# name: "vms" -# application: "rbd" -#openstack_cinder_backup_pool: -# name: "backups" -# application: "rbd" -#openstack_gnocchi_pool: -# name: "metrics" -# application: "rbd" -#openstack_cephfs_data_pool: -# name: "manila_data" -# application: "cephfs" -#openstack_cephfs_metadata_pool: -# name: "manila_metadata" -# application: "cephfs" -#openstack_pools: -# - "{{ openstack_glance_pool }}" -# - "{{ openstack_cinder_pool }}" -# - "{{ openstack_nova_pool }}" -# - "{{ openstack_cinder_backup_pool }}" -# - "{{ openstack_gnocchi_pool }}" -# - "{{ openstack_cephfs_data_pool }}" -# - "{{ openstack_cephfs_metadata_pool }}" - - -# The value for 'key' can be a pre-generated key, -# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" -# By default, keys will be auto-generated. -# -#openstack_keys: -# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } -# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } -# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } -# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } -# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } - - ############# # DASHBOARD # ############# diff --git a/group_vars/exporters.yml.sample b/group_vars/exporters.yml.sample index 9bb126b901..b9b46999d6 100644 --- a/group_vars/exporters.yml.sample +++ b/group_vars/exporters.yml.sample @@ -14,6 +14,7 @@ dummy: #ceph_exporter_addr: "0.0.0.0" #ceph_exporter_port: 9926 #ceph_exporter_stats_period: 5 # seconds +#ceph_exporter_prio_limit: 5 ########## # DOCKER # @@ -23,8 +24,10 @@ dummy: #ceph_exporter_container_params: # args: # - -f +# - -n=client.ceph-exporter # - --sock-dir=/var/run/ceph # - --addrs={{ ceph_exporter_addr }} # - --port={{ ceph_exporter_port }} # - --stats-period={{ ceph_exporter_stats_period }} +# - --prio-limit={{ ceph_exporter_prio_limit }} diff --git a/group_vars/mons.yml.sample b/group_vars/mons.yml.sample index c6892208b7..272d3e27d0 100644 --- a/group_vars/mons.yml.sample +++ b/group_vars/mons.yml.sample @@ -62,8 +62,8 @@ dummy: # - --default-mon-cluster-log-to-stderr=true # - -i={{ monitor_name }} # - --mon-data=/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} -# - --public-addr={{ _current_monitor_address }} -# - --mon-initial-members={{ groups[mon_group_name] | join(',') }} +# - --public-addr={{ _monitor_addresses[inventory_hostname] }} +# - --mon-initial-members={{ groups[mon_group_name] | map('extract', hostvars, 'ansible_facts') | map(attribute='hostname') | join(',') }} ########### diff --git a/infrastructure-playbooks/cephadm.yml b/infrastructure-playbooks/cephadm.yml index 95c3896da9..b08e7f21d8 100644 --- a/infrastructure-playbooks/cephadm.yml +++ b/infrastructure-playbooks/cephadm.yml @@ -30,15 +30,6 @@ run_once: true when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0 - - name: Validate monitor network configuration - ansible.builtin.fail: - msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided" - when: - - mon_group_name in group_names - - monitor_address == 'x.x.x.x' - - monitor_address_block == 'subnet' - - monitor_interface == 'interface' - - name: Validate dashboard configuration when: dashboard_enabled | bool run_once: true @@ -158,7 +149,7 @@ - name: Bootstrap the new cluster cephadm_bootstrap: - mon_ip: "{{ _current_monitor_address }}" + mon_ip: "{{ _monitor_addresses[inventory_hostname] }}" image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" docker: "{{ true if container_binary == 'docker' else false }}" pull: false diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 8ce5d44ac8..6aec239e48 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -24,14 +24,6 @@ private: false tasks: - - name: Warn user regarding current status of stable-8.0 - ansible.builtin.fail: - msg: > - "stable-8.0 branch isn't ready. - This is most likely going to break your deployment. - Set `i_have_understood_that_stable8_is_not_ready=yes if you know what you are doing.`" - when: i_have_understood_that_stable8_is_not_ready != 'yes' - - name: Exit playbook, if user did not mean to upgrade cluster ansible.builtin.fail: msg: > @@ -143,12 +135,40 @@ register: ceph_version changed_when: false - - name: check ceph release being deployed + - name: Check ceph release being deployed ansible.builtin.fail: msg: "This version of ceph-ansible is intended for upgrading to Ceph Reef only." when: "'reef' not in ceph_version.stdout.split()" +- name: Ensure cluster config is applied + hosts: mons[0] + become: true + gather_facts: false + any_errors_fatal: true + tasks: + - name: Import default role + ansible.builtin.import_role: + name: ceph-defaults + + - name: Import ceph-facts role + ansible.builtin.import_role: + name: ceph-facts + tasks_from: container_binary.yml + + - name: Set cluster configs + ceph_config: + action: set + who: "{{ item.0.key }}" + option: "{{ item.1.key }}" + value: "{{ item.1.value }}" + when: item.1.value != omit + loop: "{{ ceph_cluster_conf | dict2dict }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + + - name: Upgrade ceph mon cluster tags: mons vars: @@ -312,7 +332,7 @@ delegate_facts: true - name: Non container | waiting for the monitor to join the quorum... - ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json + ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ _monitor_addresses[groups['mons'][0]] }}" quorum_status --format json register: ceph_health_raw until: - ceph_health_raw.rc == 0 @@ -325,7 +345,7 @@ - name: Container | waiting for the containerized monitor to join the quorum... ansible.builtin.command: > - {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json + {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ _monitor_addresses[groups['mons'][0]] }}" quorum_status --format json register: ceph_health_raw until: - ceph_health_raw.rc == 0 @@ -1169,17 +1189,19 @@ ansible.builtin.meta: end_play when: not containerized_deployment | bool - - name: Stop the ceph-exporter service + - name: Stop the ceph-exporter service # noqa: ignore-errors ansible.builtin.systemd: name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" state: stopped + ignore_errors: true # it needs to be done in a separate task otherwise the stop just before doesn't work. - - name: Mask and disable the ceph-exporter service + - name: Mask and disable the ceph-exporter service # noqa: ignore-errors ansible.builtin.systemd: name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}" enabled: false masked: true + ignore_errors: true - name: Import ceph-defaults role ansible.builtin.import_role: @@ -1210,7 +1232,7 @@ name: ceph-facts tasks_from: container_binary.yml - - name: container | disallow pre-reef OSDs and enable all new reef-only functionality + - name: Container | disallow pre-reef OSDs and enable all new reef-only functionality ansible.builtin.command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release reef" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true @@ -1219,7 +1241,7 @@ - containerized_deployment | bool - groups.get(mon_group_name, []) | length > 0 - - name: non container | disallow pre-reef OSDs and enable all new reef-only functionality + - name: Non container | disallow pre-reef OSDs and enable all new reef-only functionality ansible.builtin.command: "ceph --cluster {{ cluster }} osd require-osd-release reef" delegate_to: "{{ groups[mon_group_name][0] }}" run_once: true diff --git a/library/cephadm_adopt.py b/library/cephadm_adopt.py index 723e0a46ce..786045130b 100644 --- a/library/cephadm_adopt.py +++ b/library/cephadm_adopt.py @@ -84,7 +84,7 @@ cephadm_adopt: name: mon.foo style: legacy - image: quay.io/ceph/daemon-base:latest-main-devel + image: quay.io/ceph/daemon-base:latest-reef-devel pull: false firewalld: false @@ -93,7 +93,7 @@ name: mon.foo style: legacy environment: - CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel + CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-reef-devel ''' RETURN = '''# ''' diff --git a/library/cephadm_bootstrap.py b/library/cephadm_bootstrap.py index f7d2cbdca5..7faefc7a7d 100644 --- a/library/cephadm_bootstrap.py +++ b/library/cephadm_bootstrap.py @@ -124,7 +124,7 @@ cephadm_bootstrap: mon_ip: 192.168.42.1 fsid: 3c9ba63a-c7df-4476-a1e7-317dfc711f82 - image: quay.io/ceph/daemon-base:latest-main-devel + image: quay.io/ceph/daemon-base:latest-reef-devel dashboard: false monitoring: false firewalld: false @@ -133,7 +133,7 @@ cephadm_bootstrap: mon_ip: 192.168.42.1 environment: - CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel + CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-reef-devel ''' RETURN = '''# ''' diff --git a/roles/ceph-config/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 index af5d1cb5de..c68908b415 100644 --- a/roles/ceph-config/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -13,11 +13,11 @@ auth client required = none fsid = {{ fsid }} mon host = {% if nb_mon > 0 %} -{% for host in _monitor_addresses -%} +{% for name, addr in _monitor_addresses.items() -%} {% if mon_host_v1.enabled | bool %} -{% set _v1 = ',v1:' + host.addr + mon_host_v1.suffix %} +{% set _v1 = ',v1:' + addr + mon_host_v1.suffix %} {% endif %} -[{{ "v2:" + host.addr + mon_host_v2.suffix }}{{ _v1 | default('') }}] +[{{ "v2:" + addr + mon_host_v2.suffix }}{{ _v1 | default('') }}] {%- if not loop.last -%},{%- endif %} {%- endfor %} {% elif nb_mon == 0 %} diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index bd01a91ecc..c22f63b5e6 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -303,14 +303,6 @@ rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be w rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor ## Monitor options -# -# You must define either monitor_interface, monitor_address or monitor_address_block. -# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). -# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. -# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. -monitor_interface: interface -monitor_address: x.x.x.x -monitor_address_block: subnet # set to either ipv4 or ipv6, whichever your network is using ip_version: ipv4 @@ -511,7 +503,7 @@ ceph_tcmalloc_max_total_thread_cache: 134217728 # DOCKER # ########## ceph_docker_image: "ceph/daemon-base" -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef ceph_docker_registry: quay.io ceph_docker_registry_auth: false # ceph_docker_registry_username: @@ -554,64 +546,6 @@ docker_pull_retry: 3 docker_pull_timeout: "300s" -############# -# OPENSTACK # -############# -openstack_config: false -# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value -# `pg_num` and `pgp_num` keys will be ignored, even if specified. -# eg: -# openstack_glance_pool: -# name: "images" -# rule_name: "my_replicated_rule" -# application: "rbd" -# pg_autoscale_mode: false -# pg_num: 16 -# pgp_num: 16 -# target_size_ratio: 0.2 -openstack_glance_pool: - name: "images" - application: "rbd" -openstack_cinder_pool: - name: "volumes" - application: "rbd" -openstack_nova_pool: - name: "vms" - application: "rbd" -openstack_cinder_backup_pool: - name: "backups" - application: "rbd" -openstack_gnocchi_pool: - name: "metrics" - application: "rbd" -openstack_cephfs_data_pool: - name: "manila_data" - application: "cephfs" -openstack_cephfs_metadata_pool: - name: "manila_metadata" - application: "cephfs" -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" - - "{{ openstack_nova_pool }}" - - "{{ openstack_cinder_backup_pool }}" - - "{{ openstack_gnocchi_pool }}" - - "{{ openstack_cephfs_data_pool }}" - - "{{ openstack_cephfs_metadata_pool }}" - - -# The value for 'key' can be a pre-generated key, -# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" -# By default, keys will be auto-generated. -# -openstack_keys: - - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } - - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } - - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } - - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } - - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } - - ############# # DASHBOARD # ############# diff --git a/roles/ceph-facts/tasks/set_monitor_address.yml b/roles/ceph-facts/tasks/set_monitor_address.yml index 8f3aa57b5f..142b56a2a1 100644 --- a/roles/ceph-facts/tasks/set_monitor_address.yml +++ b/roles/ceph-facts/tasks/set_monitor_address.yml @@ -1,59 +1,14 @@ --- -- name: Set_fact _monitor_addresses to monitor_address_block ipv4 +- name: Set_fact _monitor_addresses - ipv4 ansible.builtin.set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first}] }}" + _monitor_addresses: "{{ _monitor_addresses | default({}) | combine({item: hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['public_network'].split(',')) | first}) }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - hostvars[item]['monitor_address_block'] is defined - - hostvars[item]['monitor_address_block'] != 'subnet' - ip_version == 'ipv4' -- name: Set_fact _monitor_addresses to monitor_address_block ipv6 +- name: Set_fact _monitor_addresses - ipv6 ansible.builtin.set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ansible.utils.ipwrap}] }}" + _monitor_addresses: "{{ _monitor_addresses | default({}) | combine({item: hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['public_network'].split(',')) | last | ansible.utils.ipwrap}) }}" with_items: "{{ groups.get(mon_group_name, []) }}" when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - hostvars[item]['monitor_address_block'] is defined - - hostvars[item]['monitor_address_block'] != 'subnet' - ip_version == 'ipv6' - -- name: Set_fact _monitor_addresses to monitor_address - ansible.builtin.set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['monitor_address'] | ansible.utils.ipwrap}] }}" - with_items: "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - hostvars[item]['monitor_address'] is defined - - hostvars[item]['monitor_address'] != 'x.x.x.x' - -- name: Set_fact _monitor_addresses to monitor_interface - ipv4 - ansible.builtin.set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface'] | replace('-', '_'))][ip_version]['address'] | ansible.utils.ipwrap}] }}" - with_items: "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - ip_version == 'ipv4' - - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet' - - hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x' - - hostvars[item]['monitor_interface'] | default('interface') != 'interface' - -- name: Set_fact _monitor_addresses to monitor_interface - ipv6 - ansible.builtin.set_fact: - _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface'] | replace('-', '_'))][ip_version][0]['address'] | ansible.utils.ipwrap}] }}" - with_items: "{{ groups.get(mon_group_name, []) }}" - when: - - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list" - - ip_version == 'ipv6' - - hostvars[item]['monitor_address_block'] | default('subnet') == 'subnet' - - hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x' - - hostvars[item]['monitor_interface'] | default('interface') != 'interface' - -- name: Set_fact _current_monitor_address - ansible.builtin.set_fact: - _current_monitor_address: "{{ item.addr }}" - with_items: "{{ _monitor_addresses }}" - when: - - (inventory_hostname == item.name and not rolling_update | default(False) | bool) - or (rolling_update | default(False) | bool and item.name == groups.get(mon_group_name, [])[0]) diff --git a/roles/ceph-mon/defaults/main.yml b/roles/ceph-mon/defaults/main.yml index 1e470036b2..ec3364529a 100644 --- a/roles/ceph-mon/defaults/main.yml +++ b/roles/ceph-mon/defaults/main.yml @@ -54,7 +54,7 @@ ceph_mon_container_params: - --default-mon-cluster-log-to-stderr=true - -i={{ monitor_name }} - --mon-data=/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }} - - --public-addr={{ _current_monitor_address }} + - --public-addr={{ _monitor_addresses[inventory_hostname] }} - --mon-initial-members={{ groups[mon_group_name] | map('extract', hostvars, 'ansible_facts') | map(attribute='hostname') | join(',') }} diff --git a/roles/ceph-mon/tasks/deploy_monitors.yml b/roles/ceph-mon/tasks/deploy_monitors.yml index 1535418070..3c2a8e4a39 100644 --- a/roles/ceph-mon/tasks/deploy_monitors.yml +++ b/roles/ceph-mon/tasks/deploy_monitors.yml @@ -130,6 +130,7 @@ mode: "0600" delegate_to: "{{ item }}" loop: "{{ groups[mon_group_name] }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" - name: Import admin keyring into mon keyring ceph_authtool: @@ -141,8 +142,7 @@ environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" - no_log: false -# no_log: "{{ no_log_on_ceph_key_tasks }}" + no_log: "{{ no_log_on_ceph_key_tasks }}" when: - cephx | bool @@ -158,13 +158,13 @@ ansible.builtin.command: > {{ ceph_monmaptool_cmd }} --create - {% for host in _monitor_addresses -%} + {% for name, addr in _monitor_addresses.items() -%} --addv - {{ host.name }} + {{ hostvars[name]['ansible_facts']['hostname'] }} {% if mon_host_v1.enabled | bool %} - {% set _v1 = ',v1:' + host.addr + mon_host_v1.suffix %} + {% set _v1 = ',v1:' + addr + mon_host_v1.suffix %} {% endif %} - [{{ "v2:" + host.addr + mon_host_v2.suffix }}{{ _v1 | default('') }}] + [{{ "v2:" + addr + mon_host_v2.suffix }}{{ _v1 | default('') }}] {# {%- if not loop.last -%},{%- endif %} #} {%- endfor %} --enable-all-features diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index f51fc98459..8987f57cfc 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -99,13 +99,3 @@ ansible.builtin.include_tasks: crush_rules.yml when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool tags: wait_all_osds_up - -# Create the pools listed in openstack_pools -- name: Include openstack_config.yml - ansible.builtin.include_tasks: openstack_config.yml - when: - - not add_osd | bool - - not rolling_update | default(False) | bool - - openstack_config | bool - - inventory_hostname == groups[osd_group_name] | last - tags: wait_all_osds_up diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml deleted file mode 100644 index 6b276c2c0f..0000000000 --- a/roles/ceph-osd/tasks/openstack_config.yml +++ /dev/null @@ -1,68 +0,0 @@ ---- -- name: Pool related tasks - block: - - name: Create openstack pool(s) - ceph_pool: - name: "{{ item.name }}" - cluster: "{{ cluster }}" - pg_num: "{{ item.pg_num | default(omit) }}" - pgp_num: "{{ item.pgp_num | default(omit) }}" - size: "{{ item.size | default(omit) }}" - min_size: "{{ item.min_size | default(omit) }}" - pool_type: "{{ item.type | default('replicated') }}" - rule_name: "{{ item.rule_name | default(omit) }}" - erasure_profile: "{{ item.erasure_profile | default(omit) }}" - pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}" - target_size_ratio: "{{ item.target_size_ratio | default(omit) }}" - application: "{{ item.application | default(omit) }}" - with_items: "{{ openstack_pools }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - environment: - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - -- name: Create openstack cephx key(s) - when: - - cephx | bool - - openstack_config | bool - block: - - name: Generate keys - ceph_key: - name: "{{ item.name }}" - caps: "{{ item.caps }}" - secret: "{{ item.key | default('') }}" - cluster: "{{ cluster }}" - mode: "{{ item.mode | default(ceph_keyring_permissions) }}" - environment: - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - with_items: "{{ openstack_keys }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - no_log: "{{ no_log_on_ceph_key_tasks }}" - - - name: Get keys from monitors - ceph_key: - name: "{{ item.name }}" - cluster: "{{ cluster }}" - output_format: plain - state: info - environment: - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - register: _osp_keys - with_items: "{{ openstack_keys }}" - delegate_to: "{{ groups.get(mon_group_name)[0] }}" - no_log: "{{ no_log_on_ceph_key_tasks }}" - - - name: Copy ceph key(s) if needed - ansible.builtin.copy: - dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring" - content: "{{ item.0.stdout + '\n' }}" - owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" - group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}" - mode: "{{ item.0.item.mode | default(ceph_keyring_permissions) }}" - with_nested: - - "{{ _osp_keys.results }}" - - "{{ groups[mon_group_name] }}" - delegate_to: "{{ item.1 }}" - no_log: "{{ no_log_on_ceph_key_tasks }}" diff --git a/roles/ceph-validate/tasks/check_eth_mon.yml b/roles/ceph-validate/tasks/check_eth_mon.yml deleted file mode 100644 index 21a58f3693..0000000000 --- a/roles/ceph-validate/tasks/check_eth_mon.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Check if network interface exists - ansible.builtin.fail: - msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}" - when: monitor_interface not in ansible_facts['interfaces'] - -- name: Check if network interface is active - ansible.builtin.fail: - msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}" - when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active'] - -- name: Check if network interface has an IPv4 address - ansible.builtin.fail: - msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}" - when: - - ip_version == "ipv4" - - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined - -- name: Check if network interface has an IPv6 address - ansible.builtin.fail: - msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}" - when: - - ip_version == "ipv6" - - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv6'] is not defined diff --git a/roles/ceph-validate/tasks/check_ipaddr_mon.yml b/roles/ceph-validate/tasks/check_ipaddr_mon.yml index 9183b385bf..cf4cc71dbd 100644 --- a/roles/ceph-validate/tasks/check_ipaddr_mon.yml +++ b/roles/ceph-validate/tasks/check_ipaddr_mon.yml @@ -1,5 +1,5 @@ --- -- name: Check if network interface has an IP address in `monitor_address_block` +- name: Check if network interface has an IP address in public_network ansible.builtin.fail: - msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}" - when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0 + msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ public_network }}" + when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['public_network'].split(',')) | length == 0 diff --git a/roles/ceph-validate/tasks/check_pools.yml b/roles/ceph-validate/tasks/check_pools.yml index bca9d26ff4..0acf7c9572 100644 --- a/roles/ceph-validate/tasks/check_pools.yml +++ b/roles/ceph-validate/tasks/check_pools.yml @@ -3,7 +3,6 @@ ansible.builtin.fail: msg: "You must set a target_size_ratio value on following pool: {{ item.name }}." with_items: - - "{{ openstack_pools | default([]) }}" - "{{ cephfs_pools | default([]) }}" - "{{ pools | default([]) }}" when: diff --git a/roles/ceph-validate/tasks/check_repository.yml b/roles/ceph-validate/tasks/check_repository.yml index 01067dc441..516ca81fb8 100644 --- a/roles/ceph-validate/tasks/check_repository.yml +++ b/roles/ceph-validate/tasks/check_repository.yml @@ -12,8 +12,8 @@ - name: Validate ceph_repository_community ansible.builtin.fail: - msg: "ceph_stable_release must be 'squid'" + msg: "ceph_stable_release must be 'reef'" when: - ceph_origin == 'repository' - ceph_repository == 'community' - - ceph_stable_release not in ['squid'] + - ceph_stable_release not in ['reef'] diff --git a/roles/ceph-validate/tasks/main.yml b/roles/ceph-validate/tasks/main.yml index f0caab90f2..885ffb36ae 100644 --- a/roles/ceph-validate/tasks/main.yml +++ b/roles/ceph-validate/tasks/main.yml @@ -11,15 +11,6 @@ msg: "osd_objectstore must be 'bluestore''" when: osd_objectstore not in ['bluestore'] -- name: Validate monitor network configuration - ansible.builtin.fail: - msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided" - when: - - mon_group_name in group_names - - monitor_address == 'x.x.x.x' - - monitor_address_block == 'subnet' - - monitor_interface == 'interface' - - name: Validate radosgw network configuration ansible.builtin.fail: msg: "Either radosgw_address, radosgw_address_block or radosgw_interface must be provided" @@ -107,22 +98,6 @@ - osd_group_name in group_names - not osd_auto_discovery | default(False) | bool -- name: Include check_eth_mon.yml - ansible.builtin.include_tasks: check_eth_mon.yml - when: - - mon_group_name in group_names - - monitor_interface != "dummy" - - monitor_address == "x.x.x.x" - - monitor_address_block == "subnet" - -- name: Include check_ipaddr_mon.yml - ansible.builtin.include_tasks: check_ipaddr_mon.yml - when: - - mon_group_name in group_names - - monitor_interface == "interface" - - monitor_address == "x.x.x.x" - - monitor_address_block != "subnet" - - name: Include check_eth_rgw.yml ansible.builtin.include_tasks: check_eth_rgw.yml when: diff --git a/tests/conftest.py b/tests/conftest.py index 20e871d1aa..69de7ac69d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -188,6 +188,12 @@ def node(host, request): if request.node.get_closest_marker("dashboard") and sanitized_group_names == ['clients']: pytest.skip('Not a valid test for client node') + if request.node.get_closest_marker("ceph_crash") and sanitized_group_names == ['iscsigws']: + pytest.skip('Not a valid test for iscsigws node') + + if request.node.get_closest_marker("ceph_exporter") and sanitized_group_names == ['iscsigws']: + pytest.skip('Not a valid test for iscsigws node') + data = dict( vars=ansible_vars, docker=docker, diff --git a/tests/functional/add-mdss/container/group_vars/all b/tests/functional/add-mdss/container/group_vars/all index abacaac81d..cd9abe3971 100644 --- a/tests/functional/add-mdss/container/group_vars/all +++ b/tests/functional/add-mdss/container/group_vars/all @@ -6,7 +6,6 @@ containerized_deployment: true cluster: ceph public_network: "192.168.63.0/24" cluster_network: "192.168.64.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/add-mdss/group_vars/all b/tests/functional/add-mdss/group_vars/all index 650540b496..5bc3323899 100644 --- a/tests/functional/add-mdss/group_vars/all +++ b/tests/functional/add-mdss/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.61.0/24" cluster_network: "192.168.62.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/add-mgrs/container/group_vars/all b/tests/functional/add-mgrs/container/group_vars/all index 34065af173..e1ffbe3960 100644 --- a/tests/functional/add-mgrs/container/group_vars/all +++ b/tests/functional/add-mgrs/container/group_vars/all @@ -6,7 +6,6 @@ containerized_deployment: true cluster: ceph public_network: "192.168.75.0/24" cluster_network: "192.168.76.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/add-mgrs/group_vars/all b/tests/functional/add-mgrs/group_vars/all index fb9bde0f09..14a9601f1f 100644 --- a/tests/functional/add-mgrs/group_vars/all +++ b/tests/functional/add-mgrs/group_vars/all @@ -1,10 +1,9 @@ --- ceph_origin: repository -ceph_repository: dev +ceph_repository: community cluster: ceph public_network: "192.168.73.0/24" cluster_network: "192.168.74.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/add-mons/container/group_vars/all b/tests/functional/add-mons/container/group_vars/all index 1fff68805f..ada3299290 100644 --- a/tests/functional/add-mons/container/group_vars/all +++ b/tests/functional/add-mons/container/group_vars/all @@ -6,7 +6,6 @@ containerized_deployment: true cluster: ceph public_network: "192.168.55.0/24" cluster_network: "192.168.56.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/add-mons/group_vars/all b/tests/functional/add-mons/group_vars/all index 7988877c04..3686208dd6 100644 --- a/tests/functional/add-mons/group_vars/all +++ b/tests/functional/add-mons/group_vars/all @@ -1,10 +1,9 @@ --- ceph_origin: repository -ceph_repository: dev +ceph_repository: community cluster: ceph public_network: "192.168.53.0/24" cluster_network: "192.168.54.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/add-osds/container/group_vars/all b/tests/functional/add-osds/container/group_vars/all index 39022d7a60..74e208ca91 100644 --- a/tests/functional/add-osds/container/group_vars/all +++ b/tests/functional/add-osds/container/group_vars/all @@ -6,7 +6,6 @@ containerized_deployment: true cluster: ceph public_network: "192.168.55.0/24" cluster_network: "192.168.56.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/add-osds/group_vars/all b/tests/functional/add-osds/group_vars/all index ef109c9b8b..2b6e2b8b74 100644 --- a/tests/functional/add-osds/group_vars/all +++ b/tests/functional/add-osds/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.53.0/24" cluster_network: "192.168.54.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/add-rbdmirrors/container/group_vars/all b/tests/functional/add-rbdmirrors/container/group_vars/all index c275d4f084..112216f331 100644 --- a/tests/functional/add-rbdmirrors/container/group_vars/all +++ b/tests/functional/add-rbdmirrors/container/group_vars/all @@ -6,7 +6,6 @@ containerized_deployment: true cluster: ceph public_network: "192.168.67.0/24" cluster_network: "192.168.68.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/add-rbdmirrors/group_vars/all b/tests/functional/add-rbdmirrors/group_vars/all index 119d13167e..6b6d1dc9b5 100644 --- a/tests/functional/add-rbdmirrors/group_vars/all +++ b/tests/functional/add-rbdmirrors/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.65.0/24" cluster_network: "192.168.66.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/add-rgws/container/group_vars/all b/tests/functional/add-rgws/container/group_vars/all index 1de1eea6f8..1c02c81392 100644 --- a/tests/functional/add-rgws/container/group_vars/all +++ b/tests/functional/add-rgws/container/group_vars/all @@ -6,7 +6,6 @@ containerized_deployment: true cluster: ceph public_network: "192.168.71.0/24" cluster_network: "192.168.72.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" @@ -31,4 +30,4 @@ rgw_bucket_default_quota_max_objects: 1638400 dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/add-rgws/group_vars/all b/tests/functional/add-rgws/group_vars/all index e7326ec8ec..2aae860593 100644 --- a/tests/functional/add-rgws/group_vars/all +++ b/tests/functional/add-rgws/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.69.0/24" cluster_network: "192.168.70.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/all-in-one/container/group_vars/all b/tests/functional/all-in-one/container/group_vars/all index daa915a656..bed6ae593b 100644 --- a/tests/functional/all-in-one/container/group_vars/all +++ b/tests/functional/all-in-one/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_num_instances: 2 ceph_mon_docker_subnet: "{{ public_network }}" @@ -12,7 +11,6 @@ public_network: "192.168.19.0/24" cluster_network: "192.168.20.0/24" rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 -openstack_config: True dashboard_enabled: false ceph_conf_overrides: global: @@ -45,4 +43,4 @@ lvm_volumes: db_vg: journals ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/all-in-one/group_vars/all b/tests/functional/all-in-one/group_vars/all index f50a1bba03..e4967a8b14 100644 --- a/tests/functional/all-in-one/group_vars/all +++ b/tests/functional/all-in-one/group_vars/all @@ -2,10 +2,8 @@ containerized_deployment: False ceph_origin: repository ceph_repository: community -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" -openstack_config: True dashboard_enabled: False public_network: "192.168.17.0/24" cluster_network: "192.168.18.0/24" diff --git a/tests/functional/all_daemons/container/group_vars/all b/tests/functional/all_daemons/container/group_vars/all index 6dfcb137c9..65d71f3849 100644 --- a/tests/functional/all_daemons/container/group_vars/all +++ b/tests/functional/all_daemons/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.17.0/24" @@ -18,18 +17,6 @@ ceph_conf_overrides: mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 mon_max_pg_per_osd: 300 -openstack_config: True -openstack_glance_pool: - name: "images" - size: 1 - target_size_ratio: 0.2 -openstack_cinder_pool: - name: "volumes" - rule_name: "HDD" - size: 1 -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" docker_pull_timeout: 600s handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 @@ -38,7 +25,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B! grafana_admin_password: +xFRe+RES@7vg24n ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" diff --git a/tests/functional/all_daemons/container/hosts b/tests/functional/all_daemons/container/hosts index fa75c7d795..51d488ccbd 100644 --- a/tests/functional/all_daemons/container/hosts +++ b/tests/functional/all_daemons/container/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address=192.168.17.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.17.12 +mon0 +mon1 +mon2 [mgrs] mgr0 diff --git a/tests/functional/all_daemons/group_vars/all b/tests/functional/all_daemons/group_vars/all index efd2430cf7..75c50ecb0e 100644 --- a/tests/functional/all_daemons/group_vars/all +++ b/tests/functional/all_daemons/group_vars/all @@ -11,20 +11,6 @@ ceph_conf_overrides: mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 mon_max_pg_per_osd: 300 -openstack_config: True -openstack_glance_pool: - name: "images" - size: 1 - application: rbd - target_size_ratio: 0.2 -openstack_cinder_pool: - name: "volumes" - rule_name: "HDD" - size: 1 - application: rbd -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 mds_max_mds: 2 @@ -36,3 +22,4 @@ prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" grafana_server_group_name: ceph_monitoring +dashboard_enabled: false diff --git a/tests/functional/all_daemons/group_vars/mons b/tests/functional/all_daemons/group_vars/mons index f6ab9a5415..bc6941bd2a 100644 --- a/tests/functional/all_daemons/group_vars/mons +++ b/tests/functional/all_daemons/group_vars/mons @@ -1,6 +1,6 @@ --- -create_crush_tree: True -crush_rule_config: True +create_crush_tree: true +crush_rule_config: true crush_rule_hdd: name: HDD root: default @@ -8,4 +8,4 @@ crush_rule_hdd: class: hdd default: true crush_rules: - - "{{ crush_rule_hdd }}" \ No newline at end of file + - "{{ crush_rule_hdd }}" diff --git a/tests/functional/all_daemons/group_vars/nfss b/tests/functional/all_daemons/group_vars/nfss index fc280e2513..826bdfecd6 100644 --- a/tests/functional/all_daemons/group_vars/nfss +++ b/tests/functional/all_daemons/group_vars/nfss @@ -5,6 +5,6 @@ ganesha_conf_overrides: | CACHEINODE { Entries_HWMark = 100000; } -nfs_ganesha_stable: true -nfs_ganesha_dev: false +nfs_ganesha_stable: false +nfs_ganesha_dev: true nfs_ganesha_flavor: "ceph_main" diff --git a/tests/functional/all_daemons/hosts b/tests/functional/all_daemons/hosts index aeb10eed6f..8e2019776a 100644 --- a/tests/functional/all_daemons/hosts +++ b/tests/functional/all_daemons/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address=192.168.1.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.1.12 +mon0 +mon1 +mon2 [mgrs] mgr0 diff --git a/tests/functional/all_daemons/hosts-switch-to-containers b/tests/functional/all_daemons/hosts-switch-to-containers index fabb659f0b..ceae369ebc 100644 --- a/tests/functional/all_daemons/hosts-switch-to-containers +++ b/tests/functional/all_daemons/hosts-switch-to-containers @@ -2,9 +2,9 @@ docker=True [mons] -mon0 monitor_address=192.168.1.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.1.12 +mon0 +mon1 +mon2 [mgrs] mgr0 diff --git a/tests/functional/all_daemons_ipv6/container/group_vars/all b/tests/functional/all_daemons_ipv6/container/group_vars/all index d0c9ee6bcf..4e3ef7de40 100644 --- a/tests/functional/all_daemons_ipv6/container/group_vars/all +++ b/tests/functional/all_daemons_ipv6/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" ip_version: ipv6 @@ -19,18 +18,6 @@ ceph_conf_overrides: mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 mon_max_pg_per_osd: 300 -openstack_config: True -openstack_glance_pool: - name: "images" - size: 1 - target_size_ratio: 0.2 -openstack_cinder_pool: - name: "volumes" - rule_name: "HDD" - size: 1 -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" docker_pull_timeout: 600s handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 @@ -39,7 +26,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B! grafana_admin_password: +xFRe+RES@7vg24n ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" diff --git a/tests/functional/all_daemons_ipv6/container/hosts b/tests/functional/all_daemons_ipv6/container/hosts index 81cf5c2dc0..51d488ccbd 100644 --- a/tests/functional/all_daemons_ipv6/container/hosts +++ b/tests/functional/all_daemons_ipv6/container/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address="fdec:f1fb:29cd:6940::10" -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address="fdec:f1fb:29cd:6940::12" +mon0 +mon1 +mon2 [mgrs] mgr0 diff --git a/tests/functional/all_daemons_ipv6/group_vars/all b/tests/functional/all_daemons_ipv6/group_vars/all index 204219f19e..1748266d13 100644 --- a/tests/functional/all_daemons_ipv6/group_vars/all +++ b/tests/functional/all_daemons_ipv6/group_vars/all @@ -1,6 +1,6 @@ --- ceph_origin: repository -ceph_repository: dev +ceph_repository: community ip_version: ipv6 public_network: "fdec:f1fb:29cd:6940::/64" cluster_network: "fdec:f1fb:29cd:7120::/64" @@ -12,20 +12,6 @@ ceph_conf_overrides: mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 mon_max_pg_per_osd: 300 -openstack_config: True -openstack_glance_pool: - name: "images" - size: 1 - application: rbd - target_size_ratio: 0.2 -openstack_cinder_pool: - name: "volumes" - rule_name: "HDD" - size: 1 - application: rbd -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 mds_max_mds: 2 diff --git a/tests/functional/all_daemons_ipv6/hosts b/tests/functional/all_daemons_ipv6/hosts index 78b212478e..8e2019776a 100644 --- a/tests/functional/all_daemons_ipv6/hosts +++ b/tests/functional/all_daemons_ipv6/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address="fdec:f1fb:29cd:6940::10" -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address="fdec:f1fb:29cd:6940::12" +mon0 +mon1 +mon2 [mgrs] mgr0 diff --git a/tests/functional/cephadm/group_vars/all b/tests/functional/cephadm/group_vars/all index 2e074a2716..2203adc5e6 100644 --- a/tests/functional/cephadm/group_vars/all +++ b/tests/functional/cephadm/group_vars/all @@ -1,9 +1,8 @@ --- -monitor_interface: eth1 public_network: "192.168.30.0/24" cluster_network: "192.168.31.0/24" dashboard_admin_password: $sX!cD$rYU6qR^B! ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main-devel +ceph_docker_image_tag: latest-reef-devel containerized_deployment: true diff --git a/tests/functional/collocation/container/group_vars/all b/tests/functional/collocation/container/group_vars/all index e6f0fab1ed..e293024ceb 100644 --- a/tests/functional/collocation/container/group_vars/all +++ b/tests/functional/collocation/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_num_instances: 2 ceph_mon_docker_subnet: "{{ public_network }}" @@ -27,7 +26,7 @@ dashboard_admin_user_ro: true grafana_admin_password: +xFRe+RES@7vg24n ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" diff --git a/tests/functional/collocation/group_vars/all b/tests/functional/collocation/group_vars/all index 423ffbac7c..8e4061e4d1 100644 --- a/tests/functional/collocation/group_vars/all +++ b/tests/functional/collocation/group_vars/all @@ -2,7 +2,6 @@ containerized_deployment: False ceph_origin: repository ceph_repository: community -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.15.0/24" diff --git a/tests/functional/docker2podman/group_vars/all b/tests/functional/docker2podman/group_vars/all index 42e97ffd8e..966ab68540 100644 --- a/tests/functional/docker2podman/group_vars/all +++ b/tests/functional/docker2podman/group_vars/all @@ -5,7 +5,6 @@ docker: True container_binary: docker containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.58.0/24" @@ -17,26 +16,14 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False -openstack_glance_pool: - name: "images" - rule_name: "HDD" - size: 1 -openstack_cinder_pool: - name: "volumes" - rule_name: "HDD" - size: 1 -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 dashboard_admin_password: $sX!cD$rYU6qR^B! grafana_admin_password: +xFRe+RES@7vg24n ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" -grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" \ No newline at end of file +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/external_clients/container/inventory/group_vars/all b/tests/functional/external_clients/container/inventory/group_vars/all index 940a55ff86..beb09c357a 100644 --- a/tests/functional/external_clients/container/inventory/group_vars/all +++ b/tests/functional/external_clients/container/inventory/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_num_instances: 2 ceph_mon_docker_subnet: "{{ public_network }}" @@ -12,7 +11,6 @@ public_network: "192.168.31.0/24" cluster_network: "192.168.32.0/24" rgw_override_bucket_index_max_shards: 16 rgw_bucket_default_quota_max_objects: 1638400 -openstack_config: True dashboard_enabled: false ceph_conf_overrides: global: @@ -39,4 +37,4 @@ fsid: 40358a87-ab6e-4bdc-83db-1d909147861c generate_fsid: false ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/external_clients/inventory/group_vars/all b/tests/functional/external_clients/inventory/group_vars/all index f941ecc7bc..29c0ed43ba 100644 --- a/tests/functional/external_clients/inventory/group_vars/all +++ b/tests/functional/external_clients/inventory/group_vars/all @@ -2,10 +2,8 @@ containerized_deployment: False ceph_origin: repository ceph_repository: community -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" -openstack_config: True dashboard_enabled: False public_network: "192.168.31.0/24" cluster_network: "192.168.32.0/24" diff --git a/tests/functional/lvm-auto-discovery/container/group_vars/all b/tests/functional/lvm-auto-discovery/container/group_vars/all index dab885f7a8..af658c0c12 100644 --- a/tests/functional/lvm-auto-discovery/container/group_vars/all +++ b/tests/functional/lvm-auto-discovery/container/group_vars/all @@ -6,11 +6,10 @@ docker: True containerized_deployment: True ceph_origin: repository -ceph_repository: dev +ceph_repository: community cluster: ceph public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/lvm-auto-discovery/group_vars/all b/tests/functional/lvm-auto-discovery/group_vars/all index 149cd228fd..157cb79c74 100644 --- a/tests/functional/lvm-auto-discovery/group_vars/all +++ b/tests/functional/lvm-auto-discovery/group_vars/all @@ -5,7 +5,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" crush_device_class: test diff --git a/tests/functional/lvm-batch/container/group_vars/all b/tests/functional/lvm-batch/container/group_vars/all index a3e868dd31..292af63c54 100644 --- a/tests/functional/lvm-batch/container/group_vars/all +++ b/tests/functional/lvm-batch/container/group_vars/all @@ -10,7 +10,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" crush_device_class: test copy_admin_key: true @@ -29,4 +28,4 @@ handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/lvm-batch/group_vars/all b/tests/functional/lvm-batch/group_vars/all index 0c432635ca..22c46d1aa6 100644 --- a/tests/functional/lvm-batch/group_vars/all +++ b/tests/functional/lvm-batch/group_vars/all @@ -5,7 +5,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" crush_device_class: test copy_admin_key: true diff --git a/tests/functional/lvm-osds/container/group_vars/all b/tests/functional/lvm-osds/container/group_vars/all index 5e812e3556..64286b499b 100644 --- a/tests/functional/lvm-osds/container/group_vars/all +++ b/tests/functional/lvm-osds/container/group_vars/all @@ -8,7 +8,6 @@ ceph_origin: repository ceph_repository: community public_network: "192.168.33.0/24" cluster_network: "192.168.34.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" copy_admin_key: true containerized_deployment: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb @@ -22,20 +21,6 @@ ceph_conf_overrides: dashboard_enabled: False handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 -openstack_config: True -openstack_glance_pool: - name: "images" - type: 3 - size: 1 - application: rbd - target_size_ratio: 0.2 -openstack_cinder_pool: - name: "volumes" - size: 1 - application: rbd -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef diff --git a/tests/functional/lvm-osds/group_vars/all b/tests/functional/lvm-osds/group_vars/all index 3ee80cc074..c39cfd433d 100644 --- a/tests/functional/lvm-osds/group_vars/all +++ b/tests/functional/lvm-osds/group_vars/all @@ -4,7 +4,6 @@ ceph_origin: repository ceph_repository: community public_network: "192.168.39.0/24" cluster_network: "192.168.40.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" copy_admin_key: true # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb os_tuning_params: @@ -17,17 +16,4 @@ ceph_conf_overrides: dashboard_enabled: False handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 -openstack_config: True -openstack_glance_pool: - name: "images" - type: 3 - size: 1 - application: rbd - target_size_ratio: 0.2 -openstack_cinder_pool: - name: "volumes" - size: 1 - application: rbd -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" \ No newline at end of file + diff --git a/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all b/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all index 5ffdf22bde..657e3015fd 100644 --- a/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all +++ b/tests/functional/migrate_ceph_disk_to_ceph_volume/group_vars/all @@ -5,7 +5,6 @@ ceph_repository: community cluster: test public_network: "192.168.1.0/24" cluster_network: "192.168.2.0/24" -monitor_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" devices: diff --git a/tests/functional/ooo-collocation/Vagrantfile b/tests/functional/ooo-collocation/Vagrantfile deleted file mode 120000 index 706a5bb470..0000000000 --- a/tests/functional/ooo-collocation/Vagrantfile +++ /dev/null @@ -1 +0,0 @@ -../../../Vagrantfile \ No newline at end of file diff --git a/tests/functional/ooo-collocation/ceph-override.json b/tests/functional/ooo-collocation/ceph-override.json deleted file mode 120000 index fe2ff40d62..0000000000 --- a/tests/functional/ooo-collocation/ceph-override.json +++ /dev/null @@ -1 +0,0 @@ -../all_daemons/ceph-override.json \ No newline at end of file diff --git a/tests/functional/ooo-collocation/hosts b/tests/functional/ooo-collocation/hosts deleted file mode 100644 index 812bd8c26a..0000000000 --- a/tests/functional/ooo-collocation/hosts +++ /dev/null @@ -1,86 +0,0 @@ -all: - vars: - admin_secret: AQBSV4xaAAAAABAA3VUTiOZTHecau2SnAEVPYQ== - ceph_conf_overrides: - global: {osd_pool_default_pg_num: 8, osd_pool_default_pgp_num: 8, osd_pool_default_size: 1, - mon_allow_pool_size_one: true, - mon_warn_on_pool_no_redundancy: false, - rgw_keystone_accepted_roles: 'Member, admin', rgw_keystone_admin_domain: default, - rgw_keystone_admin_password: RtYPg7AUdsZCGv4Z4rF8FvnaR, rgw_keystone_admin_project: service, - rgw_keystone_admin_user: swift, rgw_keystone_api_version: 3, rgw_keystone_implicit_tenants: 'true', - rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0} - cluster: mycluster - ceph_docker_image: ceph/daemon - ceph_docker_image_tag: latest-main - ceph_docker_registry: quay.io - cephfs_data_pool: - name: 'manila_data' - application: "cephfs" - cephfs_metadata_pool: - name: 'manila_metadata' - application: "cephfs" - cephfs_pools: - - "{{ cephfs_data_pool }}" - - "{{ cephfs_metadata_pool }}" - cluster_network: 192.168.96.0/24 - containerized_deployment: true - devices: [/dev/sda, /dev/sdb, /dev/sdc] - docker: true - fsid: 6e008d48-1661-11e8-8546-008c3214218a - generate_fsid: false - ip_version: ipv4 - ireallymeanit: 'yes' - keys: - - {key: AQAN0RdbAAAAABAA3CpSKRVDrENjkOSunEFZ0A==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r', name: client.openstack, osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics"} - - {key: AQAN0RdbAAAAABAAtV5Dq28z4H6XxwhaNEaFZg==, mds_cap: 'allow *', mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r, allow command "auth del", allow command "auth caps", allow command "auth get", allow command "auth get-or-create"', name: client.manila, osd_cap: 'allow rw'} - - {key: AQAN0RdbAAAAABAAH5D3WgMN9Rxw3M8jkpMIfg==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow rw', name: client.radosgw, osd_cap: 'allow rwx'} - monitor_address_block: 192.168.95.0/24 - monitor_secret: AQBSV4xaAAAAABAALqm4vRHcITs4/041TwluMg== - ntp_service_enabled: false - openstack_config: true - openstack_keys: - - {key: AQAN0RdbAAAAABAA3CpSKRVDrENjkOSunEFZ0A==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r', name: client.openstack, osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=backups, allow rwx pool=vms, allow rwx pool=images, allow rwx pool=metrics"} - - {key: AQAN0RdbAAAAABAAtV5Dq28z4H6XxwhaNEaFZg==, mds_cap: 'allow *', mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow r, allow command "auth del", allow command "auth caps", allow command "auth get", allow command "auth get-or-create"', name: client.manila, osd_cap: 'allow rw'} - - {key: AQAN0RdbAAAAABAAH5D3WgMN9Rxw3M8jkpMIfg==, mgr_cap: 'allow *', mode: '0600', mon_cap: 'allow rw', name: client.radosgw, osd_cap: 'allow rwx'} - openstack_pools: - - {name: images, pg_num: 8, rule_name: 'replicated_rule'} - - {name: metrics, pg_num: 8, rule_name: 'replicated_rule'} - - {name: backups, pg_num: 8, rule_name: 'replicated_rule'} - - {name: vms, pg_num: 8, rule_name: 'replicated_rule'} - - {name: volumes, pg_num: 8, rule_name: 'replicated_rule'} - pools: [] - public_network: 192.168.95.0/24 - radosgw_address_block: 192.168.95.0/24 - radosgw_civetweb_port: '8080' - radosgw_keystone_ssl: false - user_config: true - dashboard_enabled: false -clients: - hosts: - client0: {} - client1: {} - client2: {} -mdss: - hosts: - mon0: {} -mgrs: - hosts: - mon0: {} -mons: - hosts: - mon0: {} - mon1: {} - mon2: {} -nfss: - hosts: {} -osds: - hosts: - osd0: {} - osd1: {} - osd2: {} -rbdmirrors: - hosts: {} -rgws: - hosts: - mon0: {} - osd0: {} diff --git a/tests/functional/ooo-collocation/vagrant_variables.yml b/tests/functional/ooo-collocation/vagrant_variables.yml deleted file mode 100644 index 847591200d..0000000000 --- a/tests/functional/ooo-collocation/vagrant_variables.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- - -# DEPLOY CONTAINERIZED DAEMONS -docker: True - -# DEFINE THE NUMBER OF VMS TO RUN -mon_vms: 3 -osd_vms: 3 -mds_vms: 0 -rgw_vms: 0 -nfs_vms: 0 -grafana_server_vms: 0 -rbd_mirror_vms: 0 -client_vms: 3 -mgr_vms: 0 - -# SUBNETS TO USE FOR THE VMS -public_subnet: 192.168.95 -cluster_subnet: 192.168.96 - -# MEMORY -# set 1024 for CentOS -memory: 1024 - -# Disks -# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]" -# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]" -disks: "[ '/dev/sda', '/dev/sdb', '/dev/sdc' ]" - -# VAGRANT BOX -# Ceph boxes are *strongly* suggested. They are under better control and will -# not get updated frequently unless required for build systems. These are (for -# now): -# -# * ceph/ubuntu-xenial -# -# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 -# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet -# libvirt CentOS: centos/7 -# parallels Ubuntu: parallels/ubuntu-14.04 -# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' -# For more boxes have a look at: -# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= -# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ -vagrant_box: centos/atomic-host -# client_vagrant_box: centos/stream8 -#ssh_private_key_path: "~/.ssh/id_rsa" -# The sync directory changes based on vagrant box -# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant -#vagrant_sync_dir: /home/vagrant/sync -#vagrant_sync_dir: / -# Disables synced folder creation. Not needed for testing, will skip mounting -# the vagrant directory on the remote box regardless of the provider. -vagrant_disable_synced_folder: true -# VAGRANT URL -# This is a URL to download an image from an alternate location. vagrant_box -# above should be set to the filename of the image. -# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box -# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box -# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/tests/functional/podman/group_vars/all b/tests/functional/podman/group_vars/all index 6101343ecd..427f6bed53 100644 --- a/tests/functional/podman/group_vars/all +++ b/tests/functional/podman/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.30.0/24" @@ -16,26 +15,14 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: True -openstack_glance_pool: - name: "images" - rule_name: "HDD" - size: 1 -openstack_cinder_pool: - name: "volumes" - rule_name: "HDD" - size: 1 -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 dashboard_admin_password: $sX!cD$rYU6qR^B! grafana_admin_password: +xFRe+RES@7vg24n ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" -grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" \ No newline at end of file +grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4" diff --git a/tests/functional/rbdmirror/container/group_vars/all b/tests/functional/rbdmirror/container/group_vars/all index 54924683cd..6df130bb36 100644 --- a/tests/functional/rbdmirror/container/group_vars/all +++ b/tests/functional/rbdmirror/container/group_vars/all @@ -6,7 +6,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.144.0/24" cluster_network: "192.168.145.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef diff --git a/tests/functional/rbdmirror/container/secondary/group_vars/all b/tests/functional/rbdmirror/container/secondary/group_vars/all index ccc55d1cf8..06a8dc7e77 100644 --- a/tests/functional/rbdmirror/container/secondary/group_vars/all +++ b/tests/functional/rbdmirror/container/secondary/group_vars/all @@ -6,7 +6,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.146.0/24" cluster_network: "192.168.147.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" @@ -29,4 +28,4 @@ ceph_conf_overrides: dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef diff --git a/tests/functional/rbdmirror/group_vars/all b/tests/functional/rbdmirror/group_vars/all index 93474c1303..ef706caa4d 100644 --- a/tests/functional/rbdmirror/group_vars/all +++ b/tests/functional/rbdmirror/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.140.0/24" cluster_network: "192.168.141.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/rbdmirror/secondary/group_vars/all b/tests/functional/rbdmirror/secondary/group_vars/all index 9c44e89b79..b5bf0eaee3 100644 --- a/tests/functional/rbdmirror/secondary/group_vars/all +++ b/tests/functional/rbdmirror/secondary/group_vars/all @@ -4,7 +4,6 @@ ceph_repository: community cluster: ceph public_network: "192.168.142.0/24" cluster_network: "192.168.143.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/shrink_mds/container/group_vars/all b/tests/functional/shrink_mds/container/group_vars/all index 43f2de24d9..97a6ea8f43 100644 --- a/tests/functional/shrink_mds/container/group_vars/all +++ b/tests/functional/shrink_mds/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.79.0/24" cluster_network: "192.168.80.0/24" @@ -13,9 +12,8 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False dashboard_enabled: False copy_admin_key: True ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/shrink_mds/group_vars/all b/tests/functional/shrink_mds/group_vars/all index 0ceff4cdbc..b82aa455a9 100644 --- a/tests/functional/shrink_mds/group_vars/all +++ b/tests/functional/shrink_mds/group_vars/all @@ -3,7 +3,6 @@ ceph_origin: repository ceph_repository: community public_network: "192.168.77.0/24" cluster_network: "192.168.78.0/24" -monitor_interface: eth1 radosgw_interface: eth1 journal_size: 100 osd_objectstore: "bluestore" diff --git a/tests/functional/shrink_mgr/container/group_vars/all b/tests/functional/shrink_mgr/container/group_vars/all index 5488b6c41a..367c38b70b 100644 --- a/tests/functional/shrink_mgr/container/group_vars/all +++ b/tests/functional/shrink_mgr/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.83.0/24" cluster_network: "192.168.84.0/24" @@ -13,8 +12,7 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/shrink_mgr/group_vars/all b/tests/functional/shrink_mgr/group_vars/all index bc72091a19..dea6e26e6f 100644 --- a/tests/functional/shrink_mgr/group_vars/all +++ b/tests/functional/shrink_mgr/group_vars/all @@ -1,9 +1,8 @@ --- ceph_origin: repository -ceph_repository: dev +ceph_repository: community public_network: "192.168.81.0/24" cluster_network: "192.168.82.0/24" -monitor_interface: eth1 radosgw_interface: eth1 ceph_conf_overrides: global: diff --git a/tests/functional/shrink_mon/container/group_vars/all b/tests/functional/shrink_mon/container/group_vars/all index 2fd03e9b5e..2d75b6fa61 100644 --- a/tests/functional/shrink_mon/container/group_vars/all +++ b/tests/functional/shrink_mon/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.17.0/24" cluster_network: "192.168.18.0/24" @@ -13,8 +12,7 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False dashboard_enabled: False ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/shrink_mon/hosts b/tests/functional/shrink_mon/hosts index c023021463..5d91b7dbd0 100644 --- a/tests/functional/shrink_mon/hosts +++ b/tests/functional/shrink_mon/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address=192.168.1.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.1.12 +mon0 +mon1 +mon2 [osds] osd0 \ No newline at end of file diff --git a/tests/functional/shrink_mon/hosts-switch-to-containers b/tests/functional/shrink_mon/hosts-switch-to-containers index b995e9b993..3ab72318ef 100644 --- a/tests/functional/shrink_mon/hosts-switch-to-containers +++ b/tests/functional/shrink_mon/hosts-switch-to-containers @@ -2,9 +2,9 @@ docker=True [mons] -mon0 monitor_address=192.168.1.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.1.12 +mon0 +mon1 +mon2 [osds] osd0 diff --git a/tests/functional/shrink_osd/container/group_vars/all b/tests/functional/shrink_osd/container/group_vars/all index 9e6f481b84..5cc5f6e81f 100644 --- a/tests/functional/shrink_osd/container/group_vars/all +++ b/tests/functional/shrink_osd/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.73.0/24" cluster_network: "192.168.74.0/24" @@ -13,9 +12,8 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False dashboard_enabled: False copy_admin_key: True ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/shrink_osd/group_vars/all b/tests/functional/shrink_osd/group_vars/all index d6999b17ee..956f325f85 100644 --- a/tests/functional/shrink_osd/group_vars/all +++ b/tests/functional/shrink_osd/group_vars/all @@ -6,6 +6,5 @@ cluster_network: "192.168.72.0/24" ceph_conf_overrides: global: osd_pool_default_size: 3 -openstack_config: False dashboard_enabled: False copy_admin_key: True \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/container/group_vars/all b/tests/functional/shrink_rbdmirror/container/group_vars/all index 7eeffe2663..684d5b5c29 100644 --- a/tests/functional/shrink_rbdmirror/container/group_vars/all +++ b/tests/functional/shrink_rbdmirror/container/group_vars/all @@ -5,16 +5,14 @@ docker: True public_network: "192.168.87.0/24" cluster_network: "192.168.88.0/24" containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" ceph_conf_overrides: global: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False dashboard_enabled: False copy_admin_key: True ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/shrink_rbdmirror/group_vars/all b/tests/functional/shrink_rbdmirror/group_vars/all index 8e2437b48d..1a90c972b7 100644 --- a/tests/functional/shrink_rbdmirror/group_vars/all +++ b/tests/functional/shrink_rbdmirror/group_vars/all @@ -3,7 +3,6 @@ ceph_origin: repository ceph_repository: community public_network: "192.168.85.0/24" cluster_network: "192.168.86.0/24" -monitor_interface: eth1 osd_objectstore: "bluestore" copy_admin_key: true ceph_conf_overrides: diff --git a/tests/functional/shrink_rgw/container/group_vars/all b/tests/functional/shrink_rgw/container/group_vars/all index 052f9ffc9f..900211e8de 100644 --- a/tests/functional/shrink_rgw/container/group_vars/all +++ b/tests/functional/shrink_rgw/container/group_vars/all @@ -6,7 +6,6 @@ docker: True containerized_deployment: True public_network: "192.168.91.0/24" cluster_network: "192.168.92.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" ceph_conf_overrides: @@ -14,9 +13,8 @@ ceph_conf_overrides: mon_allow_pool_size_one: true mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 -openstack_config: False dashboard_enabled: False copy_admin_key: True ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main \ No newline at end of file +ceph_docker_image_tag: latest-reef \ No newline at end of file diff --git a/tests/functional/shrink_rgw/group_vars/all b/tests/functional/shrink_rgw/group_vars/all index f67e921540..bc1e28695d 100644 --- a/tests/functional/shrink_rgw/group_vars/all +++ b/tests/functional/shrink_rgw/group_vars/all @@ -1,9 +1,8 @@ --- ceph_origin: repository -ceph_repository: dev +ceph_repository: community public_network: "192.168.89.0/24" cluster_network: "192.168.90.0/24" -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" osd_objectstore: "bluestore" copy_admin_key: true diff --git a/tests/functional/subset_update/container/group_vars/all b/tests/functional/subset_update/container/group_vars/all index 4b8941b96f..a9b38103af 100644 --- a/tests/functional/subset_update/container/group_vars/all +++ b/tests/functional/subset_update/container/group_vars/all @@ -4,7 +4,6 @@ docker: True containerized_deployment: True -monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" ceph_mon_docker_subnet: "{{ public_network }}" public_network: "192.168.5.0/24" @@ -18,7 +17,6 @@ ceph_conf_overrides: mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 mon_max_pg_per_osd: 300 -openstack_config: false docker_pull_timeout: 600s handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 @@ -29,7 +27,7 @@ dashboard_admin_password: $sX!cD$rYU6qR^B! grafana_admin_password: +xFRe+RES@7vg24n ceph_docker_registry: quay.io ceph_docker_image: ceph/daemon-base -ceph_docker_image_tag: latest-main +ceph_docker_image_tag: latest-reef node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0" prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2" alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2" diff --git a/tests/functional/subset_update/container/hosts b/tests/functional/subset_update/container/hosts index e7d4fa615c..8823f1a85b 100644 --- a/tests/functional/subset_update/container/hosts +++ b/tests/functional/subset_update/container/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address=192.168.5.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.5.12 +mon0 +mon1 +mon2 [mgrs] mon0 diff --git a/tests/functional/subset_update/group_vars/all b/tests/functional/subset_update/group_vars/all index 03d44a831f..4161ddea36 100644 --- a/tests/functional/subset_update/group_vars/all +++ b/tests/functional/subset_update/group_vars/all @@ -1,6 +1,6 @@ --- ceph_origin: repository -ceph_repository: dev +ceph_repository: community public_network: "192.168.3.0/24" cluster_network: "192.168.4.0/24" radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" @@ -11,7 +11,6 @@ ceph_conf_overrides: mon_warn_on_pool_no_redundancy: false osd_pool_default_size: 1 mon_max_pg_per_osd: 300 -openstack_config: false handler_health_mon_check_delay: 10 handler_health_osd_check_delay: 10 mds_max_mds: 2 diff --git a/tests/functional/subset_update/hosts b/tests/functional/subset_update/hosts index 18669ec1db..ce63629468 100644 --- a/tests/functional/subset_update/hosts +++ b/tests/functional/subset_update/hosts @@ -1,7 +1,7 @@ [mons] -mon0 monitor_address=192.168.3.10 -mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}" -mon2 monitor_address=192.168.3.12 +mon0 +mon1 +mon2 [mgrs] mon0 diff --git a/tests/library/test_cephadm_bootstrap.py b/tests/library/test_cephadm_bootstrap.py index 9a905d7801..55d0fda42b 100644 --- a/tests/library/test_cephadm_bootstrap.py +++ b/tests/library/test_cephadm_bootstrap.py @@ -4,7 +4,7 @@ import cephadm_bootstrap fake_fsid = '0f1e0605-db0b-485c-b366-bd8abaa83f3b' -fake_image = 'quay.io/ceph/daemon-base:latest-main-devel' +fake_image = 'quay.io/ceph/daemon-base:latest-reef-devel' fake_ip = '192.168.42.1' fake_registry = 'quay.io' fake_registry_user = 'foo' diff --git a/tests/scripts/vagrant_up.sh b/tests/scripts/vagrant_up.sh index 6a87f408a2..05b17582f4 100644 --- a/tests/scripts/vagrant_up.sh +++ b/tests/scripts/vagrant_up.sh @@ -1,4 +1,12 @@ #!/bin/bash +set -x +if [[ -n $1 ]]; then + DIRECTORY=$1 + shift +else + DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +fi +pushd "${DIRECTORY}" if [[ "${CEPH_ANSIBLE_VAGRANT_BOX}" =~ "centos/stream" ]]; then EL_VERSION="${CEPH_ANSIBLE_VAGRANT_BOX: -1}" @@ -16,4 +24,5 @@ do sleep 5 done -sleep 10 \ No newline at end of file +sleep 10 +popd diff --git a/tox-cephadm.ini b/tox-cephadm.ini index 582d035f11..f84357d1dc 100644 --- a/tox-cephadm.ini +++ b/tox-cephadm.ini @@ -29,7 +29,7 @@ changedir= {toxinidir}/tests/functional/cephadm commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/cephadm.yml --extra-vars "\ diff --git a/tox-docker2podman.ini b/tox-docker2podman.ini index 7774b13249..33eb779f64 100644 --- a/tox-docker2podman.ini +++ b/tox-docker2podman.ini @@ -29,7 +29,7 @@ changedir= {toxinidir}/tests/functional/docker2podman commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} # configure lvm diff --git a/tox-external_clients.ini b/tox-external_clients.ini index bf33c3edb3..b03f30211c 100644 --- a/tox-external_clients.ini +++ b/tox-external_clients.ini @@ -32,7 +32,7 @@ deps= -r{toxinidir}/tests/requirements.txt changedir={toxinidir}/tests/functional/external_clients{env:CONTAINER_DIR:} commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} ansible-playbook -vv --diff -i {changedir}/inventory {toxinidir}/tests/functional/setup.yml diff --git a/tox-podman.ini b/tox-podman.ini index c458cf6d4e..6dfd6f9209 100644 --- a/tox-podman.ini +++ b/tox-podman.ini @@ -33,7 +33,7 @@ changedir= {toxinidir}/tests/functional/podman commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} # configure lvm diff --git a/tox-rbdmirror.ini b/tox-rbdmirror.ini index 38bc4dffee..b1f70fddce 100644 --- a/tox-rbdmirror.ini +++ b/tox-rbdmirror.ini @@ -32,7 +32,7 @@ setenv= container: CEPH_RBD_MIRROR_REMOTE_MON_HOSTS = 192.168.144.10 non_container: CEPH_RBD_MIRROR_REMOTE_MON_HOSTS = 192.168.140.10 - UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-main + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-reef UPDATE_CEPH_DEV_BRANCH = main UPDATE_CEPH_DEV_SHA1 = latest ROLLING_UPDATE = True @@ -40,7 +40,7 @@ deps= -r{toxinidir}/tests/requirements.txt changedir={toxinidir}/tests/functional/rbdmirror{env:CONTAINER_DIR:} commands= ansible-galaxy install -r {toxinidir}/requirements.yml -v - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" @@ -63,7 +63,7 @@ commands= ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ " - bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox}" + bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir}/secondary --no-provision {posargs:--provider=virtualbox}" bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary" ansible-playbook --ssh-common-args='-F {changedir}/secondary/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" diff --git a/tox-shrink_osd.ini b/tox-shrink_osd.ini index 077eb3e53c..5b3cba56b0 100644 --- a/tox-shrink_osd.ini +++ b/tox-shrink_osd.ini @@ -70,9 +70,9 @@ setenv= container: PURGE_PLAYBOOK = purge-container-cluster.yml non_container: PLAYBOOK = site.yml.sample - CEPH_DOCKER_IMAGE_TAG = latest-main + CEPH_DOCKER_IMAGE_TAG = latest-reef CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-main - UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-main + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-reef deps= -r{toxinidir}/tests/requirements.txt changedir= @@ -84,7 +84,7 @@ commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} # configure lvm diff --git a/tox-subset_update.ini b/tox-subset_update.ini index c7c53e2f12..82081a98d4 100644 --- a/tox-subset_update.ini +++ b/tox-subset_update.ini @@ -29,7 +29,7 @@ setenv= container: PLAYBOOK = site-container.yml.sample non_container: PLAYBOOK = site.yml.sample - UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-main + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-reef UPDATE_CEPH_DEV_BRANCH = main UPDATE_CEPH_DEV_SHA1 = latest ROLLING_UPDATE = True @@ -37,17 +37,14 @@ deps= -r{toxinidir}/tests/requirements.txt changedir={toxinidir}/tests/functional/subset_update{env:CONTAINER_DIR:} commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml - non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ yes_i_know=true \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -57,8 +54,6 @@ commands= # mon1 ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit mon1 --tags=mons --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -66,8 +61,6 @@ commands= # mon0 and mon2 ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit 'mons:!mon1' --tags=mons --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -75,8 +68,6 @@ commands= # upgrade mgrs ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=mgrs --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -84,8 +75,6 @@ commands= # upgrade osd1 ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit=osd1 --tags=osds --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -93,8 +82,6 @@ commands= # upgrade remaining osds (serially) ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --limit='osds:!osd1' --tags=osds --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -102,8 +89,6 @@ commands= # upgrade rgws ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=rgws --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -111,8 +96,6 @@ commands= # post upgrade actions ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --tags=post_upgrade --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ diff --git a/tox-update.ini b/tox-update.ini index 5911bf81f6..5b58c4b066 100644 --- a/tox-update.ini +++ b/tox-update.ini @@ -11,7 +11,7 @@ allowlist_externals = pip passenv=* setenv= - ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_SSH_ARGS = -F {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections ANSIBLE_CONFIG = {toxinidir}/ansible.cfg ANSIBLE_CALLBACK_ENABLED = profile_tasks @@ -21,62 +21,61 @@ setenv= ANSIBLE_STDOUT_CALLBACK = yaml # non_container: DEV_SETUP = True # Set the vagrant box image to use - centos-non_container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 - centos-container: CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9 + CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8 INVENTORY = {env:_INVENTORY:hosts} container: CONTAINER_DIR = /container container: PLAYBOOK = site-container.yml.sample non_container: PLAYBOOK = site.yml.sample - UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-main + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-reef UPDATE_CEPH_DEV_BRANCH = main UPDATE_CEPH_DEV_SHA1 = latest ROLLING_UPDATE = True deps= -r{toxinidir}/tests/requirements.txt changedir={toxinidir}/tests/functional/all_daemons{env:CONTAINER_DIR:} commands= - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} - bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} + # use the stable-7.0 branch to deploy an octopus cluster + git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible - ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + bash {envdir}/tmp/ceph-ansible/tests/scripts/vagrant_up.sh {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:} --no-provision {posargs:--provider=virtualbox} + bash {envdir}/tmp/ceph-ansible/tests/scripts/generate_ssh_config.sh {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:} + + ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections - ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/setup.yml + ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml -# # use the stable-7.0 branch to deploy an octopus cluster -# git clone -b stable-7.0 --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible -# pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt -# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml' -# # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) -# bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm"' + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml' + # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) + bash -c 'ANSIBLE_CONFIG={envdir}/tmp/ceph-ansible/ansible.cfg ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml --extra-vars "osd_scenario=lvm" --limit 'osds:!osd2'' # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/lvm_setup.yml --limit 'osds:!osd2' - non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup=True change_dir={changedir} ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" - ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ + ansible-playbook -vv --diff -i {envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts {envdir}/tmp/ceph-ansible/{env:PLAYBOOK:site.yml.sample} --limit !nfs0 --extra-vars "\ yes_i_know=true \ delegate_facts_host={env:DELEGATE_FACTS_HOST:True} \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ + container_package_name=podman \ + container_service_name=podman \ + container_binary=podman \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ " -# pip uninstall -y ansible -# pip install -r {toxinidir}/tests/requirements.txt -# ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections + pip uninstall -y ansible + pip install -r {toxinidir}/tests/requirements.txt + ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/rolling_update.yml --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:UPDATE_CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:UPDATE_CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ " - bash -c "CEPH_STABLE_RELEASE=reef py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests" + bash -c "CEPH_STABLE_RELEASE=reef py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts --ssh-config={envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/vagrant_ssh_config {toxinidir}/tests/functional/tests" +# bash -c "CEPH_STABLE_RELEASE=reef py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/hosts --ssh-config={envdir}/tmp/ceph-ansible/tests/functional/all_daemons{env:CONTAINER_DIR:}/vagrant_ssh_config {toxinidir}/tests/functional/tests" vagrant destroy --force diff --git a/tox.ini b/tox.ini index 08bfeb18b7..1aa3fda70d 100644 --- a/tox.ini +++ b/tox.ini @@ -26,7 +26,7 @@ setenv= deps= -r{toxinidir}/tests/requirements.txt changedir={toxinidir}/tests/functional/infra_lv_create commands= - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/lv-create.yml @@ -46,7 +46,7 @@ commands= ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/tests/functional/rbd_map_devices.yml --extra-vars "\ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon-base} \ - ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-main} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-reef} \ " ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/{env:PURGE_PLAYBOOK:purge-cluster.yml} --extra-vars "\ @@ -54,7 +54,7 @@ commands= remove_packages=yes \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon-base} \ - ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-main} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-reef} \ " # re-setup lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) @@ -63,8 +63,6 @@ commands= # set up the cluster again ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ yes_i_know=true \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -78,14 +76,12 @@ commands= ireallymeanit=yes \ ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:quay.io} \ ceph_docker_image={env:CEPH_DOCKER_IMAGE:ceph/daemon-base} \ - ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-main} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-reef} \ " # set up the cluster again ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars @ceph-override.json --extra-vars "\ yes_i_know=true \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -108,8 +104,6 @@ commands= # set up the cluster again ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ yes_i_know=true \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ " # test that the cluster can be redeployed in a healthy state py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests @@ -159,7 +153,7 @@ commands= commands= ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml --extra-vars "\ ireallymeanit=yes \ - ceph_docker_image_tag=latest-main-devel \ + ceph_docker_image_tag=latest-reef-devel \ ceph_docker_registry=quay.io \ ceph_docker_image=ceph/daemon-base \ ceph_docker_registry_auth=True \ @@ -178,8 +172,6 @@ commands= ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mon1 {toxinidir}/tests/functional/setup.yml ansible-playbook -vv --diff -i {changedir}/hosts-2 {toxinidir}/infrastructure-playbooks/add-mon.yml --extra-vars "\ ireallymeanit=yes \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ " py.test --reruns 5 --reruns-delay 1 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts-2 --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests @@ -189,8 +181,6 @@ commands= ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mgrs {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ yes_i_know=true \ ireallymeanit=yes \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -203,8 +193,6 @@ commands= ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit mdss {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ yes_i_know=true \ ireallymeanit=yes \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -217,8 +205,6 @@ commands= ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rbdmirrors {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ yes_i_know=true \ ireallymeanit=yes \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -231,8 +217,6 @@ commands= ansible-playbook -vv --diff -i {changedir}/hosts-2 --limit rgws {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ yes_i_know=true \ ireallymeanit=yes \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -242,7 +226,7 @@ commands= [storage-inventory] commands= ansible-playbook -vv --diff -i {changedir}/hosts {toxinidir}/infrastructure-playbooks/storage-inventory.yml --extra-vars "\ - ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-main} \ + ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG:latest-reef} \ " [cephadm-adopt] @@ -292,11 +276,11 @@ setenv= shrink_rbdmirror: RBDMIRROR_TO_KILL = rbd-mirror0 shrink_rgw: RGW_TO_KILL = rgw0.rgw0 - CEPH_DOCKER_IMAGE_TAG = latest-main - CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-main - UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-main + CEPH_DOCKER_IMAGE_TAG = latest-reef + CEPH_DOCKER_IMAGE_TAG_BIS = latest-bis-reef + UPDATE_CEPH_DOCKER_IMAGE_TAG = latest-reef - switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-main-devel + switch_to_containers: CEPH_DOCKER_IMAGE_TAG = latest-reef-devel deps= -r{toxinidir}/tests/requirements.txt changedir= @@ -328,9 +312,8 @@ changedir= commands= ansible-galaxy collection install -r {toxinidir}/requirements.yml -v -p {envdir}/ansible_collections - non_container: ansible-playbook -vv --diff -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup" - bash {toxinidir}/tests/scripts/vagrant_up.sh --no-provision {posargs:--provider=virtualbox} + bash {toxinidir}/tests/scripts/vagrant_up.sh {changedir} --no-provision {posargs:--provider=virtualbox} bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir} # configure lvm, we exclude osd2 given this node uses lvm batch scenario (see corresponding inventory host file) @@ -343,8 +326,6 @@ commands= ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\ no_log_on_ceph_key_tasks=false \ yes_i_know=true \ - ceph_dev_branch={env:CEPH_DEV_BRANCH:main} \ - ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} \ ceph_docker_registry_auth=True \ ceph_docker_registry_username={env:DOCKER_HUB_USERNAME} \ ceph_docker_registry_password={env:DOCKER_HUB_PASSWORD} \ @@ -360,7 +341,7 @@ commands= all_daemons,all_daemons_ipv6,collocation: py.test --reruns 20 --reruns-delay 3 -n 8 --durations=0 --sudo -v --connection=ansible --ansible-inventory={changedir}/{env:INVENTORY} --ssh-config={changedir}/vagrant_ssh_config {toxinidir}/tests/functional/tests # handlers/idempotency test - all_daemons,all_daemon_ipv6,all_in_one,collocation: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "no_log_on_ceph_key_tasks=false delegate_facts_host={env:DELEGATE_FACTS_HOST:True} ceph_docker_image_tag={env:CEPH_DOCKER_IMAGE_TAG_BIS:latest-bis-main} ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} yes_i_know=true" --extra-vars @ceph-override.json + all_daemons,all_daemon_ipv6,all_in_one,collocation: ansible-playbook -vv --diff -i {changedir}/{env:INVENTORY} {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "no_log_on_ceph_key_tasks=false delegate_facts_host={env:DELEGATE_FACTS_HOST:True} yes_i_know=true" --extra-vars @ceph-override.json purge: {[purge]commands} purge_dashboard: {[purge-dashboard]commands}