Skip to content

Commit

Permalink
ceph-osd: introduce ec profile creation
Browse files Browse the repository at this point in the history
RGW Pools can now use the existing ec profiles and rules created by ceph-osd role.

Signed-off-by: Seena Fallah <[email protected]>
  • Loading branch information
clwluvw authored and guits committed Mar 16, 2024
1 parent 80666a2 commit bbc1ba5
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 35 deletions.
2 changes: 2 additions & 0 deletions group_vars/osds.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,8 @@ dummy:
# - "{{ crush_rule_hdd }}"
# - "{{ crush_rule_ssd }}"

#ceph_ec_profiles: {}

# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
Expand Down
2 changes: 2 additions & 0 deletions roles/ceph-osd/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,8 @@ crush_rules:
- "{{ crush_rule_hdd }}"
- "{{ crush_rule_ssd }}"

ceph_ec_profiles: {}

# Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }}
# and will move hosts into them which might lead to significant data movement in the cluster!
#
Expand Down
32 changes: 29 additions & 3 deletions roles/ceph-osd/tasks/crush_rules.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,39 @@
- hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool
- osd_crush_location is defined

- name: Create configured ec profiles
ceph_ec_profile:
name: "{{ item.key }}"
cluster: "{{ cluster }}"
state: "{{ item.value.state | default('present') }}"
stripe_unit: "{{ item.value.stripe_unit | default(omit) }}"
plugin: "{{ item.value.plugin | default(omit) }}"
k: "{{ item.value.k }}"
m: "{{ item.value.m }}"
d: "{{ item.value.d | default(omit) }}"
l: "{{ item.value.l | default(omit) }}"
c: "{{ item.value.c | default(omit) }}"
scalar_mds: "{{ item.value.scalar_mds | default(omit) }}"
technique: "{{ item.value.technique | default(omit) }}"
crush_root: "{{ item.value.crush_root | default(omit) }}"
crush_failure_domain: "{{ item.value.crush_failure_domain | default(omit) }}"
crush_device_class: "{{ item.value.crush_device_class | default(omit) }}"
force: "{{ item.value.force | default(false) }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ ceph_ec_profiles | dict2items }}"
delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true

- name: Create configured crush rules
ceph_crush_rule:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
rule_type: replicated
bucket_root: "{{ item.root }}"
bucket_type: "{{ item.type }}"
rule_type: "{{ item.rule_type | default('replicated') }}"
profile: "{{ item.ec_profile | default(omit) }}"
bucket_root: "{{ item.root | default(omit) }}"
bucket_type: "{{ item.type | default(omit) }}"
device_class: "{{ item.class | default(omit) }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
Expand Down
42 changes: 10 additions & 32 deletions roles/ceph-rgw/tasks/rgw_create_pools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
ceph_ec_profile:
name: "{{ item.value.ec_profile }}"
cluster: "{{ cluster }}"
k: "{{ item.value.ec_k }}"
m: "{{ item.value.ec_m }}"
k: "{{ item.value.ec_k | default(omit) }}"
m: "{{ item.value.ec_m | default(omit) }}"
crush_device_class: "{{ item.value.ec_crush_device_class | default(omit) }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
loop: "{{ rgw_create_pools | dict2items }}"
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.create_profile | default(true)
- item.value.type | default('') == 'ec'
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
Expand All @@ -27,32 +27,10 @@
loop: "{{ rgw_create_pools | dict2items }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.create_profile | default(true)
- item.value.type | default('') == 'ec'

- name: Create ec pools for rgw
ceph_pool:
name: "{{ item.key }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ item.value.pg_num | default(omit) }}"
pgp_num: "{{ item.value.pgp_num | default(omit) }}"
size: "{{ item.value.size | default(omit) }}"
pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}"
target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}"
pool_type: erasure
erasure_profile: "{{ item.value.ec_profile }}"
application: rgw
loop: "{{ rgw_create_pools | dict2items }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- item.value.type is defined
- item.value.type == 'ec'
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"

- name: Create replicated pools for rgw
- name: Create rgw pools
ceph_pool:
name: "{{ item.key }}"
state: present
Expand All @@ -63,12 +41,12 @@
min_size: "{{ item.value.min_size | default(omit) }}"
pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}"
target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}"
pool_type: replicated
rule_name: "{{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
pool_type: "{{ 'erasure' if item.value.type | default('') == 'ec' else 'replicated' }}"
erasure_profile: "{{ item.value.ec_profile | default(omit) }}"
rule_name: "{{ item.value.rule_name if item.value.rule_name is defined else item.key if item.value.type | default('') == 'ec' else ceph_osd_pool_default_crush_rule_name }}"
application: rgw
loop: "{{ rgw_create_pools | dict2items }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.value.type is not defined or item.value.type == 'replicated'
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
2 changes: 2 additions & 0 deletions roles/ceph-validate/tasks/check_rgw_pools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.create_profile | default(true)
- item.value.ec_k is undefined

- name: Fail if ec_m is not set for ec pools
Expand All @@ -24,4 +25,5 @@
when:
- item.value.type is defined
- item.value.type == 'ec'
- item.value.create_profile | default(true)
- item.value.ec_m is undefined

0 comments on commit bbc1ba5

Please sign in to comment.