From ef80748a00e7d4c90a399df64dadf239dbc61531 Mon Sep 17 00:00:00 2001 From: Mike Krasnianskyi Date: Thu, 12 Mar 2020 14:07:19 +0000 Subject: [PATCH 1/6] working on cassandra restore --- _dependencies/tasks/main.yml | 20 ++++----- cluster_hosts/tasks/main.yml | 12 +++++- create/tasks/aws.yml | 82 +++++++++++++++++++++++++++++++++++- create/tasks/aws_disks.yml | 13 ++++++ 4 files changed, 113 insertions(+), 14 deletions(-) create mode 100644 create/tasks/aws_disks.yml diff --git a/_dependencies/tasks/main.yml b/_dependencies/tasks/main.yml index a5c210a6..51aab58d 100644 --- a/_dependencies/tasks/main.yml +++ b/_dependencies/tasks/main.yml @@ -4,14 +4,14 @@ include_vars: { dir: "{{ playbook_dir }}/group_vars/{{ clusterid }}" } tags: clusterverse_clean,clusterverse_create,clusterverse_dynamic_inventory,clusterverse_config,clusterverse_readiness -- name: Preflight check - block: - - assert: { that: "ansible_version.full is version_compare('2.9', '>=')", msg: "Ansible >=2.9 required." } - - assert: { that: "app_name is defined and app_name != ''", msg: "Please define app_name" } - - assert: { that: "app_class is defined and app_class != ''", msg: "Please define app_class" } - - assert: { that: "clusterid is defined and cluster_vars is defined", msg: "Please define clusterid" } - - assert: { that: "buildenv is defined and cluster_vars[buildenv] is defined", msg: "Please define buildenv" } +# - name: Preflight check +# block: +# - assert: { that: "ansible_version.full is version_compare('2.9', '>=')", msg: "Ansible >=2.9 required." } +# - assert: { that: "app_name is defined and app_name != ''", msg: "Please define app_name" } +# - assert: { that: "app_class is defined and app_class != ''", msg: "Please define app_class" } +# - assert: { that: "clusterid is defined and cluster_vars is defined", msg: "Please define clusterid" } +# - assert: { that: "buildenv is defined and cluster_vars[buildenv] is defined", msg: "Please define buildenv" } - - assert: { that: "(cluster_vars.assign_public_ip == 'yes' and cluster_vars.inventory_ip == 'public') or (cluster_vars.inventory_ip == 'private')", msg: "If inventory_ip=='public', 'assign_public_ip' must be 'yes'" } - when: cluster_vars.type == "gce" or cluster_vars.type == "aws" - tags: clusterverse_clean,clusterverse_create,clusterverse_dynamic_inventory,clusterverse_config,clusterverse_readiness +# - assert: { that: "(cluster_vars.assign_public_ip == 'yes' and cluster_vars.inventory_ip == 'public') or (cluster_vars.inventory_ip == 'private')", msg: "If inventory_ip=='public', 'assign_public_ip' must be 'yes'" } +# when: cluster_vars.type == "gce" or cluster_vars.type == "aws" +# tags: clusterverse_clean,clusterverse_create,clusterverse_dynamic_inventory,clusterverse_config,clusterverse_readiness diff --git a/cluster_hosts/tasks/main.yml b/cluster_hosts/tasks/main.yml index 52582ce1..a87c5dbc 100644 --- a/cluster_hosts/tasks/main.yml +++ b/cluster_hosts/tasks/main.yml @@ -4,25 +4,30 @@ set_fact: epoch_time: "{{ansible_date_time.epoch}}" -# Create an array of dictionaries containing all the hostnames PER-AZ (i.e. couchbase-dev-node-a0, couchbase-dev-master-a1, couchbase-dev-master-b0, couchbase-dev-master-b1 etc) to be created: - set_fact: cluster_hosts_flat: | {% set res = [] -%} {%- for hostttype in cluster_vars[buildenv].hosttype_vars.keys() -%} + {%- set _count = [] %} {%- for azname in cluster_vars[buildenv].hosttype_vars[hostttype].vms_by_az.keys() -%} {%- for azcount in range(0,cluster_vars[buildenv].hosttype_vars[hostttype].vms_by_az[azname]|int) -%} + {%- set _c_dummy = _count.append(1) -%} {% set _dummy = res.extend([{ 'hosttype': hostttype, 'hostname': cluster_name + '-' + hostttype + '-' + azname + azcount|string, 'az_name': azname|string, 'flavor': cluster_vars[buildenv].hosttype_vars[hostttype].flavor, - 'release': release_version + 'release': release_version, + 'auto_volumes': cluster_vars[buildenv].hosttype_vars[hostttype].auto_volumes, + 'index_per_hosttype': (_count|length - 1) }]) -%} {%- endfor %} {%- endfor %} {%- endfor %} {{ res }} + + - include_tasks: aws.yml when: cluster_vars.type == "aws" @@ -30,3 +35,6 @@ when: cluster_vars.type == "gce" #- debug: msg={{cluster_hosts_flat}} + + +# Correcting @eyettea 's clean solution: Another solution that's a bit cleaner imo is to initialize an empty list {% set count = [ ] %}, add an item to the list in every loop {% set __ = count.append(1) %} and use the length to display the count count|length \ No newline at end of file diff --git a/create/tasks/aws.yml b/create/tasks/aws.yml index af67d840..2a015973 100644 --- a/create/tasks/aws.yml +++ b/create/tasks/aws.yml @@ -1,4 +1,40 @@ --- +- name: Get snapshots info + ec2_snapshot_info: + aws_access_key: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" + region: "{{ cluster_vars.region }}" + filters: "{{ _snapshot_tags[0] }}" + register: r_ebs_snapshots + when: + - _snapshot_tags|length > 0 + vars: + _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" + +- name: Assert that snapshots exists + assert: + that: + - item in _available_snapshots + quiet: true + fail_msg: "{{ item }} not in available snapshots {{ _available_snapshots }}" + loop: "{{ _configuration_snapshots }}" + vars: + _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" + _configuration_snapshots: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags.*[]')|unique }}" + _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" + when: + - _snapshot_tags|length > 0 + +- include_tasks: aws_disks.yml + loop_control: + loop_var: loop_instance + loop: "{{ cluster_hosts_flat|sort(attribute='hostname') }}" + # when: + # - _available_snapshots|length > 0 + vars: + _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" + +- meta: end_play - name: Create AWS security group ec2_group: @@ -43,15 +79,57 @@ owner: "{{ lookup('env','USER')| lower }}" maintenance_mode: "{%- if prometheus_set_unset_maintenance_mode|bool -%}true{%- else -%}false{%- endif -%}" termination_protection: "{{cluster_vars[buildenv].termination_protection}}" - volumes: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].auto_volumes | default([])}}" + # volumes: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].auto_volumes | default([])}}" count_tag: Name: "{{item.hostname}}" exact_count: 1 - with_items: "{{cluster_hosts_flat}}" + # with_items: "{{cluster_hosts_flat}}" async: 7200 poll: 0 register: aws_instances + + + + +# - block: + +# - name: create | Create volumes +# ec2_vol: +# aws_access_key: "{{ provision_aws_access_key }}" +# aws_secret_key: "{{ provision_aws_secret_key }}" +# security_token: "{{ provision_security_token }}" +# profile: "{{ provision_profile }}" +# region: "{{ provision_region }}" +# # this option managed on ansible level on not on AWS, therefore removed +# # to identify this volume as created by this role - new tag will be added +# # delete_on_termination: "{{ item.value['delete_on_termination'] }}" +# iops: "{% if item.value['volume_type'] == 'io1' %}{{ item.value['volume_iops'] | default(omit,true) }}{% endif %}" +# volume_size: "{{ item.value['volume_size'] }}" +# name: "{{ provision_ec2_instance_name }}-{{ item.key }}" +# volume_type: "{{ item.value['volume_type'] }}" +# encrypted: "{{ item.value['encrypted'] }}" +# zone: "{{ _ec2_vpc_subnet_facts.subnets[0].availability_zone }}" +# tags: |- +# {{ item.value['tags'] | combine({ +# 'Hostname' : provision_ec2_instance_name, +# 'Mountpoint' : item.value['mountpoint'], +# 'Type' : provision_node_type, +# 'Device' : item.key, +# 'Fstype' : item.value['fstype'], +# 'managed_by' : 'ansible' +# }) }} +# loop: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].auto_volumes | default([])}}" +# async: 7200 +# poll: 0 +# changed_when: false +# register: _volumes_data + + + + + + - name: Wait for aws instance creation to complete async_status: jid: "{{ item.ansible_job_id }}" diff --git a/create/tasks/aws_disks.yml b/create/tasks/aws_disks.yml new file mode 100644 index 00000000..1ab52c57 --- /dev/null +++ b/create/tasks/aws_disks.yml @@ -0,0 +1,13 @@ +- debug: + msg: + - "{{ loop_instance }}" + - "{{ loop_instance.hostname }}" + - "{{ loop_instance.index_per_hosttype }}" + - "{{ item }}" + - "{{ (r_ebs_snapshots.snapshots|json_query(_query))[0].snapshot_id }}" + - "{{ cluster_hosts_flat }}" + loop: "{{ loop_instance.auto_volumes }}" + when: + - item.snapshot_tags is defined + vars: + _query: "[?tags.backup_id == '{{ item.snapshot_tags['tag:backup_id']}}' && tags.node == '{{ loop_instance.index_per_hosttype }}']" From b535b85f0a3b535646ac80596d2763431a84906a Mon Sep 17 00:00:00 2001 From: Mike Krasnianskyi Date: Mon, 16 Mar 2020 10:10:18 +0000 Subject: [PATCH 2/6] add snapshots restore --- create/tasks/aws.yml | 61 ++++++++++++++++++++------------------ create/tasks/aws_disks.yml | 25 +++++++++------- 2 files changed, 46 insertions(+), 40 deletions(-) diff --git a/create/tasks/aws.yml b/create/tasks/aws.yml index 2a015973..b3c5ec96 100644 --- a/create/tasks/aws.yml +++ b/create/tasks/aws.yml @@ -1,38 +1,41 @@ --- -- name: Get snapshots info - ec2_snapshot_info: - aws_access_key: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}" - aws_secret_key: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" - region: "{{ cluster_vars.region }}" - filters: "{{ _snapshot_tags[0] }}" - register: r_ebs_snapshots - when: - - _snapshot_tags|length > 0 - vars: - _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" - -- name: Assert that snapshots exists - assert: - that: - - item in _available_snapshots - quiet: true - fail_msg: "{{ item }} not in available snapshots {{ _available_snapshots }}" - loop: "{{ _configuration_snapshots }}" +- block: + - name: Get snapshots info + ec2_snapshot_info: + aws_access_key: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" + region: "{{ cluster_vars.region }}" + filters: "{{ _snapshot_tags[0] }}" + register: r_ebs_snapshots + + - name: Assert that snapshots exists + assert: + that: + - item in _available_snapshots + quiet: true + fail_msg: "{{ item }} not in available snapshots {{ _available_snapshots }}" + loop: "{{ _configuration_snapshots }}" + vars: + _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" + _configuration_snapshots: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags.*[]')|unique }}" + + - include_tasks: aws_disks.yml + loop_control: + loop_var: loop_instance + loop: "{{ cluster_hosts_flat|sort(attribute='hostname') }}" + vars: + _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" + + - set_fact: + cluster_hosts_flat: "{{ cluster_hosts_flat_snapshots }}" + vars: - _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" - _configuration_snapshots: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags.*[]')|unique }}" _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" when: - _snapshot_tags|length > 0 -- include_tasks: aws_disks.yml - loop_control: - loop_var: loop_instance - loop: "{{ cluster_hosts_flat|sort(attribute='hostname') }}" - # when: - # - _available_snapshots|length > 0 - vars: - _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" +- debug: + msg: "{{ cluster_hosts_flat }}" - meta: end_play diff --git a/create/tasks/aws_disks.yml b/create/tasks/aws_disks.yml index 1ab52c57..c0cdff6b 100644 --- a/create/tasks/aws_disks.yml +++ b/create/tasks/aws_disks.yml @@ -1,13 +1,16 @@ -- debug: - msg: - - "{{ loop_instance }}" - - "{{ loop_instance.hostname }}" - - "{{ loop_instance.index_per_hosttype }}" - - "{{ item }}" - - "{{ (r_ebs_snapshots.snapshots|json_query(_query))[0].snapshot_id }}" - - "{{ cluster_hosts_flat }}" +- set_fact: + auto_volumes_snapshots: "{{ auto_volumes_snapshots|default([]) + [_auto_instance] }}" loop: "{{ loop_instance.auto_volumes }}" - when: - - item.snapshot_tags is defined vars: - _query: "[?tags.backup_id == '{{ item.snapshot_tags['tag:backup_id']}}' && tags.node == '{{ loop_instance.index_per_hosttype }}']" + _auto_instance: "{% if 'snapshot_tags' in item.keys() %}{{ item|combine({'snapshot_id': _snapshot_id }) }}{% else %}{{ item }}{% endif %}" + _snapshot_id: "{% if 'snapshot_tags' in item.keys() %}{{ (r_ebs_snapshots.snapshots|json_query(_query))[0].snapshot_id }}{% endif %}" + _query: "{% if 'snapshot_tags' in item.keys() %}[?tags.backup_id == '{{ item.snapshot_tags['tag:backup_id']}}' && tags.node == '{{ loop_instance.index_per_hosttype }}']{% endif %}" + +- set_fact: + instance_with_auto_snapshots: "{{ instance_with_auto_snapshots|default({})|combine({'auto_volumes': auto_volumes_snapshots }) }}" + +- set_fact: + cluster_hosts_flat_snapshots: "{{ cluster_hosts_flat_snapshots|default([]) + [ _cluster_hosts_flat_snapshots_loop ]}}" + auto_volumes_snapshots: [] + vars: + _cluster_hosts_flat_snapshots_loop: "{{ loop_instance| combine(instance_with_auto_snapshots) }}" From 77ae5b650dc4c61718cdd8020c324736c3f56631 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Date: Tue, 17 Mar 2020 17:12:23 +0000 Subject: [PATCH 3/6] Auto_volumes: snapshots tags and ids fix --- create/tasks/aws.yml | 55 +++++--------------------------------- create/tasks/aws_disks.yml | 2 +- 2 files changed, 7 insertions(+), 50 deletions(-) diff --git a/create/tasks/aws.yml b/create/tasks/aws.yml index b3c5ec96..1291cfa2 100644 --- a/create/tasks/aws.yml +++ b/create/tasks/aws.yml @@ -30,15 +30,14 @@ cluster_hosts_flat: "{{ cluster_hosts_flat_snapshots }}" vars: - _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" - when: - - _snapshot_tags|length > 0 + _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" + _snapshot_tags_check: "{{ _snapshot_tags[0] if _snapshot_tags|length > 0 else {} }}" + when: + - _snapshot_tags_check != {} - debug: msg: "{{ cluster_hosts_flat }}" -- meta: end_play - - name: Create AWS security group ec2_group: name: "{{ cluster_name }}-sg" @@ -82,57 +81,15 @@ owner: "{{ lookup('env','USER')| lower }}" maintenance_mode: "{%- if prometheus_set_unset_maintenance_mode|bool -%}true{%- else -%}false{%- endif -%}" termination_protection: "{{cluster_vars[buildenv].termination_protection}}" - # volumes: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].auto_volumes | default([])}}" + volumes: "{{item.auto_volumes | default([])}}" count_tag: Name: "{{item.hostname}}" exact_count: 1 - # with_items: "{{cluster_hosts_flat}}" + with_items: "{{cluster_hosts_flat}}" async: 7200 poll: 0 register: aws_instances - - - - -# - block: - -# - name: create | Create volumes -# ec2_vol: -# aws_access_key: "{{ provision_aws_access_key }}" -# aws_secret_key: "{{ provision_aws_secret_key }}" -# security_token: "{{ provision_security_token }}" -# profile: "{{ provision_profile }}" -# region: "{{ provision_region }}" -# # this option managed on ansible level on not on AWS, therefore removed -# # to identify this volume as created by this role - new tag will be added -# # delete_on_termination: "{{ item.value['delete_on_termination'] }}" -# iops: "{% if item.value['volume_type'] == 'io1' %}{{ item.value['volume_iops'] | default(omit,true) }}{% endif %}" -# volume_size: "{{ item.value['volume_size'] }}" -# name: "{{ provision_ec2_instance_name }}-{{ item.key }}" -# volume_type: "{{ item.value['volume_type'] }}" -# encrypted: "{{ item.value['encrypted'] }}" -# zone: "{{ _ec2_vpc_subnet_facts.subnets[0].availability_zone }}" -# tags: |- -# {{ item.value['tags'] | combine({ -# 'Hostname' : provision_ec2_instance_name, -# 'Mountpoint' : item.value['mountpoint'], -# 'Type' : provision_node_type, -# 'Device' : item.key, -# 'Fstype' : item.value['fstype'], -# 'managed_by' : 'ansible' -# }) }} -# loop: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].auto_volumes | default([])}}" -# async: 7200 -# poll: 0 -# changed_when: false -# register: _volumes_data - - - - - - - name: Wait for aws instance creation to complete async_status: jid: "{{ item.ansible_job_id }}" diff --git a/create/tasks/aws_disks.yml b/create/tasks/aws_disks.yml index c0cdff6b..a28b1cb1 100644 --- a/create/tasks/aws_disks.yml +++ b/create/tasks/aws_disks.yml @@ -2,7 +2,7 @@ auto_volumes_snapshots: "{{ auto_volumes_snapshots|default([]) + [_auto_instance] }}" loop: "{{ loop_instance.auto_volumes }}" vars: - _auto_instance: "{% if 'snapshot_tags' in item.keys() %}{{ item|combine({'snapshot_id': _snapshot_id }) }}{% else %}{{ item }}{% endif %}" + _auto_instance: "{% if 'snapshot_tags' in item.keys() %}{{ item|combine({'snapshot': _snapshot_id }) }}{% else %}{{ item }}{% endif %}" _snapshot_id: "{% if 'snapshot_tags' in item.keys() %}{{ (r_ebs_snapshots.snapshots|json_query(_query))[0].snapshot_id }}{% endif %}" _query: "{% if 'snapshot_tags' in item.keys() %}[?tags.backup_id == '{{ item.snapshot_tags['tag:backup_id']}}' && tags.node == '{{ loop_instance.index_per_hosttype }}']{% endif %}" From 6df7df69ea29eba38baa606e6a8715394346cc67 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Date: Thu, 19 Mar 2020 13:35:13 +0000 Subject: [PATCH 4/6] Fix blocks previously left commented out --- _dependencies/tasks/main.yml | 20 ++++++++++---------- cluster_hosts/tasks/main.yml | 5 ----- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/_dependencies/tasks/main.yml b/_dependencies/tasks/main.yml index 51aab58d..a5c210a6 100644 --- a/_dependencies/tasks/main.yml +++ b/_dependencies/tasks/main.yml @@ -4,14 +4,14 @@ include_vars: { dir: "{{ playbook_dir }}/group_vars/{{ clusterid }}" } tags: clusterverse_clean,clusterverse_create,clusterverse_dynamic_inventory,clusterverse_config,clusterverse_readiness -# - name: Preflight check -# block: -# - assert: { that: "ansible_version.full is version_compare('2.9', '>=')", msg: "Ansible >=2.9 required." } -# - assert: { that: "app_name is defined and app_name != ''", msg: "Please define app_name" } -# - assert: { that: "app_class is defined and app_class != ''", msg: "Please define app_class" } -# - assert: { that: "clusterid is defined and cluster_vars is defined", msg: "Please define clusterid" } -# - assert: { that: "buildenv is defined and cluster_vars[buildenv] is defined", msg: "Please define buildenv" } +- name: Preflight check + block: + - assert: { that: "ansible_version.full is version_compare('2.9', '>=')", msg: "Ansible >=2.9 required." } + - assert: { that: "app_name is defined and app_name != ''", msg: "Please define app_name" } + - assert: { that: "app_class is defined and app_class != ''", msg: "Please define app_class" } + - assert: { that: "clusterid is defined and cluster_vars is defined", msg: "Please define clusterid" } + - assert: { that: "buildenv is defined and cluster_vars[buildenv] is defined", msg: "Please define buildenv" } -# - assert: { that: "(cluster_vars.assign_public_ip == 'yes' and cluster_vars.inventory_ip == 'public') or (cluster_vars.inventory_ip == 'private')", msg: "If inventory_ip=='public', 'assign_public_ip' must be 'yes'" } -# when: cluster_vars.type == "gce" or cluster_vars.type == "aws" -# tags: clusterverse_clean,clusterverse_create,clusterverse_dynamic_inventory,clusterverse_config,clusterverse_readiness + - assert: { that: "(cluster_vars.assign_public_ip == 'yes' and cluster_vars.inventory_ip == 'public') or (cluster_vars.inventory_ip == 'private')", msg: "If inventory_ip=='public', 'assign_public_ip' must be 'yes'" } + when: cluster_vars.type == "gce" or cluster_vars.type == "aws" + tags: clusterverse_clean,clusterverse_create,clusterverse_dynamic_inventory,clusterverse_config,clusterverse_readiness diff --git a/cluster_hosts/tasks/main.yml b/cluster_hosts/tasks/main.yml index a87c5dbc..63d64698 100644 --- a/cluster_hosts/tasks/main.yml +++ b/cluster_hosts/tasks/main.yml @@ -33,8 +33,3 @@ - include_tasks: gce.yml when: cluster_vars.type == "gce" - -#- debug: msg={{cluster_hosts_flat}} - - -# Correcting @eyettea 's clean solution: Another solution that's a bit cleaner imo is to initialize an empty list {% set count = [ ] %}, add an item to the list in every loop {% set __ = count.append(1) %} and use the length to display the count count|length \ No newline at end of file From 468b25656c5a2339665ac31d02d23b08fbb44d00 Mon Sep 17 00:00:00 2001 From: Mike Krasnianskyi Date: Wed, 25 Mar 2020 15:36:47 +0000 Subject: [PATCH 5/6] Add ability to restore from a snapshots --- cluster_hosts/tasks/aws.yml | 39 ++++++++++++++++++++++++++++++++++++ cluster_hosts/tasks/main.yml | 1 + create/tasks/aws.yml | 2 +- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cluster_hosts/tasks/aws.yml b/cluster_hosts/tasks/aws.yml index 96f665ee..8c536322 100644 --- a/cluster_hosts/tasks/aws.yml +++ b/cluster_hosts/tasks/aws.yml @@ -145,3 +145,42 @@ {% endif %} {%- endfor %} {{ res }} + +- block: + - name: Get snapshots info + ec2_snapshot_info: + aws_access_key: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}" + aws_secret_key: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" + region: "{{ cluster_vars.region }}" + filters: "{{ _snapshot_tags[0] }}" + register: r_ebs_snapshots + + - name: Assert that snapshots exists + assert: + that: + - item in _available_snapshots + quiet: true + fail_msg: "{{ item }} not in available snapshots {{ _available_snapshots }}" + loop: "{{ _configuration_snapshots }}" + vars: + _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" + _configuration_snapshots: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags.*[]')|unique }}" + + - name: update cluster_hosts_flat with snapshot_id + set_fact: + cluster_hosts_flat: | + {%- for host in cluster_hosts_flat -%} + {%- set cluster_host_topology = host.hostname | regex_replace('^.*(-.*?-).*$', '\\1') -%} + {%- for vol in host.auto_volumes -%} + {%- set cur_snapshot = r_ebs_snapshots | default([]) | to_json | from_json | json_query('snapshots[?contains(tags.Name, \'' + cluster_host_topology + '\')]') -%} + {%- if cur_snapshot and 'snapshot_tags' in vol.keys() -%} + {%- set _dummy = vol.update({'shapshot_id': cur_snapshot[0].snapshot_id}) -%} + {%- set _dummy = vol.pop('snapshot_tags') -%} + {%- endif %} + {%- endfor %} + {%- endfor %} + {{ cluster_hosts_flat }} + vars: + _snapshot_tags: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags') }}" + when: + - _snapshot_tags|length > 0 \ No newline at end of file diff --git a/cluster_hosts/tasks/main.yml b/cluster_hosts/tasks/main.yml index 52582ce1..dab90499 100644 --- a/cluster_hosts/tasks/main.yml +++ b/cluster_hosts/tasks/main.yml @@ -16,6 +16,7 @@ 'hostname': cluster_name + '-' + hostttype + '-' + azname + azcount|string, 'az_name': azname|string, 'flavor': cluster_vars[buildenv].hosttype_vars[hostttype].flavor, + 'auto_volumes': cluster_vars[buildenv].hosttype_vars[hostttype].auto_volumes, 'release': release_version }]) -%} {%- endfor %} diff --git a/create/tasks/aws.yml b/create/tasks/aws.yml index af67d840..c18ee348 100644 --- a/create/tasks/aws.yml +++ b/create/tasks/aws.yml @@ -43,7 +43,7 @@ owner: "{{ lookup('env','USER')| lower }}" maintenance_mode: "{%- if prometheus_set_unset_maintenance_mode|bool -%}true{%- else -%}false{%- endif -%}" termination_protection: "{{cluster_vars[buildenv].termination_protection}}" - volumes: "{{cluster_vars[buildenv].hosttype_vars[item.hosttype].auto_volumes | default([])}}" + volumes: "{{ item.auto_volumes | default([]) }}" count_tag: Name: "{{item.hostname}}" exact_count: 1 From ea00fa093afc930365e99210bd5ccee1ff986625 Mon Sep 17 00:00:00 2001 From: Mike Krasnianskyi Date: Wed, 25 Mar 2020 16:06:10 +0000 Subject: [PATCH 6/6] Fix aws credentials --- cluster_hosts/tasks/aws.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/cluster_hosts/tasks/aws.yml b/cluster_hosts/tasks/aws.yml index 8c536322..b9485c43 100644 --- a/cluster_hosts/tasks/aws.yml +++ b/cluster_hosts/tasks/aws.yml @@ -149,11 +149,11 @@ - block: - name: Get snapshots info ec2_snapshot_info: - aws_access_key: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}" - aws_secret_key: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" + aws_access_key: "{{cluster_vars[buildenv].aws_access_key}}" + aws_secret_key: "{{cluster_vars[buildenv].aws_secret_key}}" region: "{{ cluster_vars.region }}" filters: "{{ _snapshot_tags[0] }}" - register: r_ebs_snapshots + register: r__ebs_snapshots - name: Assert that snapshots exists assert: @@ -163,7 +163,7 @@ fail_msg: "{{ item }} not in available snapshots {{ _available_snapshots }}" loop: "{{ _configuration_snapshots }}" vars: - _available_snapshots: "{{ r_ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" + _available_snapshots: "{{ r__ebs_snapshots.snapshots|json_query('[].tags.backup_id')|unique }}" _configuration_snapshots: "{{ cluster_vars[buildenv].hosttype_vars|json_query('*.auto_volumes[].snapshot_tags.*[]')|unique }}" - name: update cluster_hosts_flat with snapshot_id @@ -172,7 +172,7 @@ {%- for host in cluster_hosts_flat -%} {%- set cluster_host_topology = host.hostname | regex_replace('^.*(-.*?-).*$', '\\1') -%} {%- for vol in host.auto_volumes -%} - {%- set cur_snapshot = r_ebs_snapshots | default([]) | to_json | from_json | json_query('snapshots[?contains(tags.Name, \'' + cluster_host_topology + '\')]') -%} + {%- set cur_snapshot = r__ebs_snapshots | default([]) | to_json | from_json | json_query('snapshots[?contains(tags.Name, \'' + cluster_host_topology + '\')]') -%} {%- if cur_snapshot and 'snapshot_tags' in vol.keys() -%} {%- set _dummy = vol.update({'shapshot_id': cur_snapshot[0].snapshot_id}) -%} {%- set _dummy = vol.pop('snapshot_tags') -%}