Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

stable 8.0 #7481

Merged
merged 17 commits into from
Mar 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ GRAFANA = settings['grafana_server_vms']
NRBD_MIRRORS = settings['rbd_mirror_vms']
CLIENTS = settings['client_vms']
MGRS = settings['mgr_vms']
PUBLIC_SUBNET = settings['public_subnet']
CLUSTER_SUBNET = settings['cluster_subnet']
PUBLIC_SUBNET = ENV['CEPH_PUBLIC_SUBNET'] || settings['public_subnet']
CLUSTER_SUBNET = ENV['CEPH_CLUSTER_SUBNET'] || settings['cluster_subnet']
BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['vagrant_box']
CLIENT_BOX = ENV['CEPH_ANSIBLE_VAGRANT_BOX'] || settings['client_vagrant_box'] || BOX
BOX_URL = ENV['CEPH_ANSIBLE_VAGRANT_BOX_URL'] || settings['vagrant_box_url']
Expand Down Expand Up @@ -88,7 +88,6 @@ ansible_provision = proc do |ansible|
if DOCKER then
ansible.extra_vars = ansible.extra_vars.merge({
containerized_deployment: 'true',
monitor_interface: ETH,
ceph_mon_docker_subnet: ansible.extra_vars[:public_network],
devices: settings['disks'],
radosgw_interface: ETH,
Expand All @@ -97,16 +96,13 @@ ansible_provision = proc do |ansible|
else
ansible.extra_vars = ansible.extra_vars.merge({
devices: settings['disks'],
monitor_interface: ETH,
radosgw_interface: ETH,
os_tuning_params: settings['os_tuning_params'],
})
end

if BOX == 'linode' then
ansible.sudo = true
# Use monitor_address_block instead of monitor_interface:
ansible.extra_vars.delete(:monitor_interface)
# Use radosgw_address_block instead of radosgw_interface:
ansible.extra_vars.delete(:radosgw_interface)
ansible.extra_vars = ansible.extra_vars.merge({
Expand Down
2 changes: 0 additions & 2 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,6 @@ An example configuration that deploys the upstream ``octopus`` version of Ceph w
ceph_repository: community
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
monitor_interface: eth1
devices:
- '/dev/sda'
- '/dev/sdb'
Expand All @@ -238,7 +237,6 @@ selection or other aspects of your cluster.

- ``ceph_origin``
- ``public_network``
- ``monitor_interface`` or ``monitor_address``


When deploying RGW instance(s) you are required to set the ``radosgw_interface`` or ``radosgw_address`` config option.
Expand Down
8 changes: 4 additions & 4 deletions docs/source/testing/scenarios.rst
Original file line number Diff line number Diff line change
Expand Up @@ -130,15 +130,15 @@ way so that a vagrant environment can be isolated to the given scenario.
The ``hosts`` file should contain the hosts needed for the scenario. This might
seem a bit repetitive since machines are already defined in
:ref:`vagrant_variables` but it allows granular changes to hosts (for example
defining an interface vs. an IP on a monitor) which can help catch issues in
defining different public_network values between monitors) which can help catch issues in
``ceph-ansible`` configuration. For example:

.. code-block:: ini

[mons]
mon0 monitor_address=192.168.5.10
mon1 monitor_address=192.168.5.11
mon2 monitor_interface=eth1
mon0 public_network=192.168.1.0/24
mon1 public_network=192.168.2.0/24
mon2 public_network=192.168.3.0/24

.. _group_vars:

Expand Down
68 changes: 1 addition & 67 deletions group_vars/all.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -311,14 +311,6 @@ dummy:
#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor

## Monitor options
#
# You must define either monitor_interface, monitor_address or monitor_address_block.
# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
#monitor_interface: interface
#monitor_address: x.x.x.x
#monitor_address_block: subnet
# set to either ipv4 or ipv6, whichever your network is using
#ip_version: ipv4

Expand Down Expand Up @@ -519,7 +511,7 @@ dummy:
# DOCKER #
##########
#ceph_docker_image: "ceph/daemon-base"
#ceph_docker_image_tag: latest-main
#ceph_docker_image_tag: latest-reef
#ceph_docker_registry: quay.io
#ceph_docker_registry_auth: false
# ceph_docker_registry_username:
Expand Down Expand Up @@ -562,64 +554,6 @@ dummy:
#docker_pull_timeout: "300s"


#############
# OPENSTACK #
#############
#openstack_config: false
# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
# `pg_num` and `pgp_num` keys will be ignored, even if specified.
# eg:
# openstack_glance_pool:
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
#openstack_glance_pool:
# name: "images"
# application: "rbd"
#openstack_cinder_pool:
# name: "volumes"
# application: "rbd"
#openstack_nova_pool:
# name: "vms"
# application: "rbd"
#openstack_cinder_backup_pool:
# name: "backups"
# application: "rbd"
#openstack_gnocchi_pool:
# name: "metrics"
# application: "rbd"
#openstack_cephfs_data_pool:
# name: "manila_data"
# application: "cephfs"
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# application: "cephfs"
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
# - "{{ openstack_nova_pool }}"
# - "{{ openstack_cinder_backup_pool }}"
# - "{{ openstack_gnocchi_pool }}"
# - "{{ openstack_cephfs_data_pool }}"
# - "{{ openstack_cephfs_metadata_pool }}"


# The value for 'key' can be a pre-generated key,
# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
# By default, keys will be auto-generated.
#
#openstack_keys:
# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }


#############
# DASHBOARD #
#############
Expand Down
3 changes: 3 additions & 0 deletions group_vars/exporters.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ dummy:
#ceph_exporter_addr: "0.0.0.0"
#ceph_exporter_port: 9926
#ceph_exporter_stats_period: 5 # seconds
#ceph_exporter_prio_limit: 5

##########
# DOCKER #
Expand All @@ -23,8 +24,10 @@ dummy:
#ceph_exporter_container_params:
# args:
# - -f
# - -n=client.ceph-exporter
# - --sock-dir=/var/run/ceph
# - --addrs={{ ceph_exporter_addr }}
# - --port={{ ceph_exporter_port }}
# - --stats-period={{ ceph_exporter_stats_period }}
# - --prio-limit={{ ceph_exporter_prio_limit }}

4 changes: 2 additions & 2 deletions group_vars/mons.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ dummy:
# - --default-mon-cluster-log-to-stderr=true
# - -i={{ monitor_name }}
# - --mon-data=/var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
# - --public-addr={{ _current_monitor_address }}
# - --mon-initial-members={{ groups[mon_group_name] | join(',') }}
# - --public-addr={{ _monitor_addresses[inventory_hostname] }}
# - --mon-initial-members={{ groups[mon_group_name] | map('extract', hostvars, 'ansible_facts') | map(attribute='hostname') | join(',') }}


###########
Expand Down
11 changes: 1 addition & 10 deletions infrastructure-playbooks/cephadm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,6 @@
run_once: true
when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0

- name: Validate monitor network configuration
ansible.builtin.fail:
msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
when:
- mon_group_name in group_names
- monitor_address == 'x.x.x.x'
- monitor_address_block == 'subnet'
- monitor_interface == 'interface'

- name: Validate dashboard configuration
when: dashboard_enabled | bool
run_once: true
Expand Down Expand Up @@ -158,7 +149,7 @@

- name: Bootstrap the new cluster
cephadm_bootstrap:
mon_ip: "{{ _current_monitor_address }}"
mon_ip: "{{ _monitor_addresses[inventory_hostname] }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
pull: false
Expand Down
52 changes: 37 additions & 15 deletions infrastructure-playbooks/rolling_update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,6 @@
private: false

tasks:
- name: Warn user regarding current status of stable-8.0
ansible.builtin.fail:
msg: >
"stable-8.0 branch isn't ready.
This is most likely going to break your deployment.
Set `i_have_understood_that_stable8_is_not_ready=yes if you know what you are doing.`"
when: i_have_understood_that_stable8_is_not_ready != 'yes'

- name: Exit playbook, if user did not mean to upgrade cluster
ansible.builtin.fail:
msg: >
Expand Down Expand Up @@ -143,12 +135,40 @@
register: ceph_version
changed_when: false

- name: check ceph release being deployed
- name: Check ceph release being deployed
ansible.builtin.fail:
msg: "This version of ceph-ansible is intended for upgrading to Ceph Reef only."
when: "'reef' not in ceph_version.stdout.split()"


- name: Ensure cluster config is applied
hosts: mons[0]
become: true
gather_facts: false
any_errors_fatal: true
tasks:
- name: Import default role
ansible.builtin.import_role:
name: ceph-defaults

- name: Import ceph-facts role
ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml

- name: Set cluster configs
ceph_config:
action: set
who: "{{ item.0.key }}"
option: "{{ item.1.key }}"
value: "{{ item.1.value }}"
when: item.1.value != omit
loop: "{{ ceph_cluster_conf | dict2dict }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"


- name: Upgrade ceph mon cluster
tags: mons
vars:
Expand Down Expand Up @@ -312,7 +332,7 @@
delegate_facts: true

- name: Non container | waiting for the monitor to join the quorum...
ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ _monitor_addresses[groups['mons'][0]] }}" quorum_status --format json
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
Expand All @@ -325,7 +345,7 @@

- name: Container | waiting for the containerized monitor to join the quorum...
ansible.builtin.command: >
{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ _monitor_addresses[groups['mons'][0]] }}" quorum_status --format json
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
Expand Down Expand Up @@ -1169,17 +1189,19 @@
ansible.builtin.meta: end_play
when: not containerized_deployment | bool

- name: Stop the ceph-exporter service
- name: Stop the ceph-exporter service # noqa: ignore-errors
ansible.builtin.systemd:
name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}"
state: stopped
ignore_errors: true

# it needs to be done in a separate task otherwise the stop just before doesn't work.
- name: Mask and disable the ceph-exporter service
- name: Mask and disable the ceph-exporter service # noqa: ignore-errors
ansible.builtin.systemd:
name: "{{ 'ceph-exporter@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-exporter.service' }}"
enabled: false
masked: true
ignore_errors: true

- name: Import ceph-defaults role
ansible.builtin.import_role:
Expand Down Expand Up @@ -1210,7 +1232,7 @@
name: ceph-facts
tasks_from: container_binary.yml

- name: container | disallow pre-reef OSDs and enable all new reef-only functionality
- name: Container | disallow pre-reef OSDs and enable all new reef-only functionality
ansible.builtin.command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release reef"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
Expand All @@ -1219,7 +1241,7 @@
- containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0

- name: non container | disallow pre-reef OSDs and enable all new reef-only functionality
- name: Non container | disallow pre-reef OSDs and enable all new reef-only functionality
ansible.builtin.command: "ceph --cluster {{ cluster }} osd require-osd-release reef"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
Expand Down
4 changes: 2 additions & 2 deletions library/cephadm_adopt.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
cephadm_adopt:
name: mon.foo
style: legacy
image: quay.io/ceph/daemon-base:latest-main-devel
image: quay.io/ceph/daemon-base:latest-reef-devel
pull: false
firewalld: false

Expand All @@ -93,7 +93,7 @@
name: mon.foo
style: legacy
environment:
CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel
CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-reef-devel
'''

RETURN = '''# '''
Expand Down
4 changes: 2 additions & 2 deletions library/cephadm_bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
cephadm_bootstrap:
mon_ip: 192.168.42.1
fsid: 3c9ba63a-c7df-4476-a1e7-317dfc711f82
image: quay.io/ceph/daemon-base:latest-main-devel
image: quay.io/ceph/daemon-base:latest-reef-devel
dashboard: false
monitoring: false
firewalld: false
Expand All @@ -133,7 +133,7 @@
cephadm_bootstrap:
mon_ip: 192.168.42.1
environment:
CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-main-devel
CEPHADM_IMAGE: quay.io/ceph/daemon-base:latest-reef-devel
'''

RETURN = '''# '''
Expand Down
6 changes: 3 additions & 3 deletions roles/ceph-config/templates/ceph.conf.j2
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,11 @@ auth client required = none

fsid = {{ fsid }}
mon host = {% if nb_mon > 0 %}
{% for host in _monitor_addresses -%}
{% for name, addr in _monitor_addresses.items() -%}
{% if mon_host_v1.enabled | bool %}
{% set _v1 = ',v1:' + host.addr + mon_host_v1.suffix %}
{% set _v1 = ',v1:' + addr + mon_host_v1.suffix %}
{% endif %}
[{{ "v2:" + host.addr + mon_host_v2.suffix }}{{ _v1 | default('') }}]
[{{ "v2:" + addr + mon_host_v2.suffix }}{{ _v1 | default('') }}]
{%- if not loop.last -%},{%- endif %}
{%- endfor %}
{% elif nb_mon == 0 %}
Expand Down
Loading
Loading