diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 430318d0ed..2723cd5131 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -148,6 +148,8 @@ dummy: # - "{{ crush_rule_hdd }}" # - "{{ crush_rule_ssd }}" +#ceph_ec_profiles: {} + # Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} # and will move hosts into them which might lead to significant data movement in the cluster! # diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 99019ee50e..629400f57c 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -140,6 +140,8 @@ crush_rules: - "{{ crush_rule_hdd }}" - "{{ crush_rule_ssd }}" +ceph_ec_profiles: {} + # Caution: this will create crush roots and racks according to hostvars {{ osd_crush_location }} # and will move hosts into them which might lead to significant data movement in the cluster! # diff --git a/roles/ceph-osd/tasks/crush_rules.yml b/roles/ceph-osd/tasks/crush_rules.yml index 303326794e..0d4164b557 100644 --- a/roles/ceph-osd/tasks/crush_rules.yml +++ b/roles/ceph-osd/tasks/crush_rules.yml @@ -10,13 +10,39 @@ - hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool - osd_crush_location is defined +- name: Create configured ec profiles + ceph_ec_profile: + name: "{{ item.key }}" + cluster: "{{ cluster }}" + state: "{{ item.value.state | default('present') }}" + stripe_unit: "{{ item.value.stripe_unit | default(omit) }}" + plugin: "{{ item.value.plugin | default(omit) }}" + k: "{{ item.value.k }}" + m: "{{ item.value.m }}" + d: "{{ item.value.d | default(omit) }}" + l: "{{ item.value.l | default(omit) }}" + c: "{{ item.value.c | default(omit) }}" + scalar_mds: "{{ item.value.scalar_mds | default(omit) }}" + technique: "{{ item.value.technique | default(omit) }}" + crush_root: "{{ item.value.crush_root | default(omit) }}" + crush_failure_domain: "{{ item.value.crush_failure_domain | default(omit) }}" + crush_device_class: "{{ item.value.crush_device_class | default(omit) }}" + force: "{{ item.value.force | default(false) }}" + environment: + CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" + CEPH_CONTAINER_BINARY: "{{ container_binary }}" + loop: "{{ ceph_ec_profiles | dict2items }}" + delegate_to: '{{ groups[mon_group_name][0] }}' + run_once: true + - name: Create configured crush rules ceph_crush_rule: name: "{{ item.name }}" cluster: "{{ cluster }}" - rule_type: replicated - bucket_root: "{{ item.root }}" - bucket_type: "{{ item.type }}" + rule_type: "{{ item.rule_type | default('replicated') }}" + profile: "{{ item.ec_profile | default(omit) }}" + bucket_root: "{{ item.root | default(omit) }}" + bucket_type: "{{ item.type | default(omit) }}" device_class: "{{ item.class | default(omit) }}" environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" diff --git a/roles/ceph-rgw/tasks/rgw_create_pools.yml b/roles/ceph-rgw/tasks/rgw_create_pools.yml index 3f6d1c2dd5..b529a0ad95 100644 --- a/roles/ceph-rgw/tasks/rgw_create_pools.yml +++ b/roles/ceph-rgw/tasks/rgw_create_pools.yml @@ -3,14 +3,14 @@ ceph_ec_profile: name: "{{ item.value.ec_profile }}" cluster: "{{ cluster }}" - k: "{{ item.value.ec_k }}" - m: "{{ item.value.ec_m }}" + k: "{{ item.value.ec_k | default(omit) }}" + m: "{{ item.value.ec_m | default(omit) }}" crush_device_class: "{{ item.value.ec_crush_device_class | default(omit) }}" delegate_to: "{{ groups[mon_group_name][0] }}" loop: "{{ rgw_create_pools | dict2items }}" when: - - item.value.type is defined - - item.value.type == 'ec' + - item.value.create_profile | default(true) + - item.value.type | default('') == 'ec' environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" @@ -27,32 +27,10 @@ loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" when: - - item.value.type is defined - - item.value.type == 'ec' + - item.value.create_profile | default(true) + - item.value.type | default('') == 'ec' -- name: Create ec pools for rgw - ceph_pool: - name: "{{ item.key }}" - state: present - cluster: "{{ cluster }}" - pg_num: "{{ item.value.pg_num | default(omit) }}" - pgp_num: "{{ item.value.pgp_num | default(omit) }}" - size: "{{ item.value.size | default(omit) }}" - pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}" - target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}" - pool_type: erasure - erasure_profile: "{{ item.value.ec_profile }}" - application: rgw - loop: "{{ rgw_create_pools | dict2items }}" - delegate_to: "{{ groups[mon_group_name][0] }}" - when: - - item.value.type is defined - - item.value.type == 'ec' - environment: - CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" - CEPH_CONTAINER_BINARY: "{{ container_binary }}" - -- name: Create replicated pools for rgw +- name: Create rgw pools ceph_pool: name: "{{ item.key }}" state: present @@ -63,12 +41,12 @@ min_size: "{{ item.value.min_size | default(omit) }}" pg_autoscale_mode: "{{ item.value.pg_autoscale_mode | default(omit) }}" target_size_ratio: "{{ item.value.target_size_ratio | default(omit) }}" - pool_type: replicated - rule_name: "{{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}" + pool_type: "{{ 'erasure' if item.value.type | default('') == 'ec' else 'replicated' }}" + erasure_profile: "{{ item.value.ec_profile | default(omit) }}" + rule_name: "{{ item.value.rule_name if item.value.rule_name is defined else item.key if item.value.type | default('') == 'ec' else ceph_osd_pool_default_crush_rule_name }}" application: rgw loop: "{{ rgw_create_pools | dict2items }}" delegate_to: "{{ groups[mon_group_name][0] }}" - when: item.value.type is not defined or item.value.type == 'replicated' environment: CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}" CEPH_CONTAINER_BINARY: "{{ container_binary }}" diff --git a/roles/ceph-validate/tasks/check_rgw_pools.yml b/roles/ceph-validate/tasks/check_rgw_pools.yml index ae30eec6d1..73fbeb5c6d 100644 --- a/roles/ceph-validate/tasks/check_rgw_pools.yml +++ b/roles/ceph-validate/tasks/check_rgw_pools.yml @@ -15,6 +15,7 @@ when: - item.value.type is defined - item.value.type == 'ec' + - item.value.create_profile | default(true) - item.value.ec_k is undefined - name: Fail if ec_m is not set for ec pools @@ -24,4 +25,5 @@ when: - item.value.type is defined - item.value.type == 'ec' + - item.value.create_profile | default(true) - item.value.ec_m is undefined