Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ceph-config: introduce dedicated cluster config flow #7475

Merged
merged 4 commits into from
Feb 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions group_vars/all.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,16 @@ dummy:

#cephx: true

# Cluster configuration
#ceph_cluster_conf:
# global:
# public_network: "{{ public_network | default(omit) }}"
# cluster_network: "{{ cluster_network | default(omit) }}"
# osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}"
# ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}"
# ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}"
# osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}"
Comment on lines +262 to +269
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
#ceph_cluster_conf:
# global:
# public_network: "{{ public_network | default(omit) }}"
# cluster_network: "{{ cluster_network | default(omit) }}"
# osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}"
# ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}"
# ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}"
# osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}"
# ceph_cluster_conf:
# global:
# public_network: "{{ public_network | default(omit) }}"
# cluster_network: "{{ cluster_network | default(omit) }}"
# osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}"
# ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}"
# ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}"
# osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}"

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is generated by the script - would it work by changing it? I guess CI would complain?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have to modify the script then 🙂

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess it was good to have comments for vars with no space so the real comments for vars could be distinguished from those. What do you think?


## Client options
#
#rbd_cache: "true"
Expand Down
23 changes: 23 additions & 0 deletions plugins/filter/dict2dict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type


class FilterModule(object):
''' Loop over nested dictionaries '''

def dict2dict(self, nested_dict):
items = []
for key, value in nested_dict.items():
for k, v in value.items():
items.append(
(
{'key': key, 'value': value},
{'key': k, 'value': v},
),
)
return items

def filters(self):
return {
'dict2dict': self.dict2dict
}
62 changes: 54 additions & 8 deletions roles/ceph-config/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,21 +97,66 @@
- name: Set osd related config facts
when: inventory_hostname in groups.get(osd_group_name, [])
block:
- name: Set_fact _osd_memory_target, override from ceph_conf_overrides
ansible.builtin.set_fact:
_osd_memory_target: "{{ item }}"
loop:
- "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}"
- "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
when: item

- name: Set_fact _osd_memory_target
ansible.builtin.set_fact:
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
when:
- _osd_memory_target is undefined
- num_osds | default(0) | int > 0
- ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float)
- ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') == ''

- name: Set osd_memory_target to cluster host config
ceph_config:
action: set
who: "osd.*/{{ ansible_facts['hostname'] }}:host"
option: "osd_memory_target"
value: "{{ _osd_memory_target }}"
when:
- _osd_memory_target is defined
- running_mon is defined
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ running_mon }}"

- name: Set rgw configs
when: inventory_hostname in groups.get(rgw_group_name, [])
block:
- name: Render rgw configs
vars:
_rgw_binding_socket: "{{ item.radosgw_address | default(_radosgw_address) | string + ':' + item.radosgw_frontend_port | default(radosgw_frontend_port) | string }}"
_rgw_beast_endpoint: "{{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}"
_rgw_beast_ssl_option: "{{ ' ssl_certificate=' + radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}"
ansible.builtin.set_fact:
_ceph_ansible_rgw_conf: >-
{{ _ceph_ansible_rgw_conf | default({}) | combine({
'client.rgw.' + ansible_facts['hostname'] + '.' + item.instance_name: {
'log_file': '/var/log/ceph/' + cluster + '-rgw-' + ansible_facts['hostname'] + '.' + item.instance_name + '.log',
'rgw_frontends': 'beast ' + _rgw_beast_endpoint + _rgw_beast_ssl_option,
}
}, recursive=true) }}
loop: "{{ rgw_instances }}"

- name: Set config to cluster
ceph_config:
action: set
who: "{{ item.0.key }}"
option: "{{ item.1.key }}"
value: "{{ item.1.value }}"
loop: "{{ _ceph_ansible_rgw_conf | dict2dict }}"
when:
- rgw_conf_to_cluster | default(true) | bool
- running_mon is defined
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ running_mon }}"

- name: Set rgw configs to file
ansible.builtin.set_fact:
ceph_conf_overrides: "{{ ceph_conf_overrides | default({}) | combine(_ceph_ansible_rgw_conf, recursive=true) }}"
when: not rgw_conf_to_cluster | default(true) | bool

- name: Create ceph conf directory
ansible.builtin.file:
Expand All @@ -135,6 +180,7 @@
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "0644"
config_overrides: "{{ ceph_conf_overrides }}"
config_type: ini
notify:
- Restart ceph mons
Expand Down
10 changes: 5 additions & 5 deletions roles/ceph-config/templates/ceph.conf.j2
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
# {{ ansible_managed }}

[global]
#{% if not cephx | bool %}
#auth cluster required = none
#auth service required = none
#auth client required = none
#{% endif %}
{% if not cephx | bool %}
auth cluster required = none
auth service required = none
auth client required = none
{% endif %}
guits marked this conversation as resolved.
Show resolved Hide resolved
{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}

{% set nb_mon = groups.get(mon_group_name, []) | length | int %}
Expand Down
10 changes: 10 additions & 0 deletions roles/ceph-defaults/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,16 @@ ceph_keyring_permissions: '0600'

cephx: true

# Cluster configuration
ceph_cluster_conf:
global:
public_network: "{{ public_network | default(omit) }}"
cluster_network: "{{ cluster_network | default(omit) }}"
osd_pool_default_crush_rule: "{{ osd_pool_default_crush_rule }}"
ms_bind_ipv6: "{{ (ip_version == 'ipv6') | string }}"
ms_bind_ipv4: "{{ (ip_version == 'ipv4') | string }}"
osd_crush_chooseleaf_type: "{{ '0' if common_single_host_mode | default(false) else omit }}"

## Client options
#
rbd_cache: "true"
Expand Down
11 changes: 10 additions & 1 deletion roles/ceph-handler/handlers/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -78,4 +78,13 @@
- "Restart ceph rbdmirrors"
- "Restart ceph mgrs"
register: tmpdirpath
when: tmpdirpath.path is defined
when:
- tmpdirpath.path is defined
- not _crash_handler_called | default(false) | bool
- not _mds_handler_called | default(false) | bool
- not _mgr_handler_called | default(false) | bool
- not _mon_handler_called | default(false) | bool
- not _nfs_handler_called | default(false) | bool
- not _osd_handler_called | default(false) | bool
- not _rbdmirror_handler_called | default(false) | bool
- not _rgw_handler_called | default(false) | bool
11 changes: 10 additions & 1 deletion roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,16 @@ check_rest() {

for ((i=0; i<${RGW_NUMS}; i++)); do
# First, restart the daemon
systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}

# Check if systemd unit exists
# This is needed for new instances are the restart might trigger before the deployment
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
# This is needed for new instances are the restart might trigger before the deployment
# This is needed for new instances as the restart might trigger before the deployment

if systemctl list-units --full --all | grep -q "ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}"; then
systemctl restart ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]}
else
echo "Systemd unit ceph-radosgw@rgw.${HOST_NAME}.${INSTANCES_NAME[i]} does not exist."
continue
fi

# Check socket files
check_socket ${i}
# Check rest
Expand Down
32 changes: 0 additions & 32 deletions roles/ceph-rgw/tasks/pre_requisite.yml
Original file line number Diff line number Diff line change
@@ -1,36 +1,4 @@
---
- name: Set_fact _rgw_hostname
ansible.builtin.set_fact:
_rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}"

- name: Set rgw parameter (log file)
ceph_config:
action: set
who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
option: "log file"
value: "/var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + item.instance_name }}.log"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"

- name: Set rgw parameter (rgw_frontends)
ceph_config:
action: set
who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
option: "rgw_frontends"
value: "beast port={{ item.radosgw_frontend_port | string }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
notify: Restart ceph rgws

# rgw_frontends
# {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}

- name: Create rados gateway directories
ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
Expand Down
51 changes: 9 additions & 42 deletions site-container.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -174,58 +174,25 @@
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"

- hosts: mons[0]
become: True
become: true
gather_facts: false
any_errors_fatal: true
tasks:
- import_role:
- name: Import default role
ansible.builtin.import_role:
name: ceph-defaults

- name: set global config
ceph_config:
action: set
who: "global"
option: "{{ item.key }}"
value: "{{ item.value }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_dict:
"{{ {
'public_network': public_network | default(False),
'cluster_network': cluster_network | default(False),
'osd pool default crush rule': osd_pool_default_crush_rule,
'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
} }}"
when:
- inventory_hostname == ansible_play_hosts_all | last
- item.value

- name: set global config overrides
ceph_config:
action: set
who: "global"
option: "{{ item.key }}"
value: "{{ item.value }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
when: inventory_hostname == ansible_play_hosts_all | last
with_dict: "{{ ceph_conf_overrides['global'] }}"

- name: set osd_memory_target
- name: Set cluster configs
ceph_config:
action: set
who: "osd.*/{{ item }}:host"
option: "osd_memory_target"
value: "{{ _osd_memory_target | default(osd_memory_target) }}"
who: "{{ item.0.key }}"
option: "{{ item.1.key }}"
value: "{{ item.1.value }}"
when: item.1.value != omit
loop: "{{ ceph_cluster_conf | dict2dict }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
when: inventory_hostname == ansible_play_hosts_all | last
loop: "{{ groups[osd_group_name] | default([]) }}"

- hosts: osds
become: True
Expand Down
45 changes: 9 additions & 36 deletions site.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -166,49 +166,22 @@
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"

- hosts: mons[0]
become: True
become: true
gather_facts: false
any_errors_fatal: true
tasks:
- import_role:
- name: Import default role
ansible.builtin.import_role:
name: ceph-defaults

- name: set global config
ceph_config:
action: set
who: "global"
option: "{{ item.key }}"
value: "{{ item.value }}"
with_dict:
"{{ {
'public_network': public_network | default(False),
'cluster_network': cluster_network | default(False),
'osd pool default crush rule': osd_pool_default_crush_rule,
'ms bind ipv6': 'true' if ip_version == 'ipv6' else 'false',
'ms bind ipv4': 'false' if ip_version == 'ipv6' else 'true',
'osd crush chooseleaf type': '0' if common_single_host_mode | default(False) | bool else False,
} }}"
when:
- inventory_hostname == ansible_play_hosts_all | last
- item.value

- name: set global config overrides
ceph_config:
action: set
who: "global"
option: "{{ item.key }}"
value: "{{ item.value }}"
when: inventory_hostname == ansible_play_hosts_all | last
with_dict: "{{ ceph_conf_overrides['global'] }}"

- name: set osd_memory_target
- name: Set cluster configs
ceph_config:
action: set
who: "osd.*/{{ item }}:host"
option: "osd_memory_target"
value: "{{ _osd_memory_target | default(osd_memory_target) }}"
when: inventory_hostname == ansible_play_hosts_all | last
loop: "{{ groups[osd_group_name] | default([]) }}"
who: "{{ item.0.key }}"
option: "{{ item.1.key }}"
value: "{{ item.1.value }}"
when: item.1.value != omit
loop: "{{ ceph_cluster_conf | dict2dict }}"

- hosts: osds
gather_facts: false
Expand Down
Loading