From af6a28f177a3b782dade6cb0e88ffa6717f4addd Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Mon, 24 Jan 2022 16:31:40 -0800 Subject: [PATCH 01/31] Add ec2_lc* integration tests (#824) Add ec2_lc* integration tests SUMMARY Added integration tests which exercise ec2_lc, ec2_lc_find, and ec2_lc_info. ISSUE TYPE Feature Pull Request ADDITIONAL INFORMATION ec2_lc (launch configurations) work with ASGs (auto-scaling groups) to define launch config for instances in the ASG. We have tests for ec2_asg that make use of ec2_lc but as it is slow already, it makes sense to have a dedicated test suite for ec2_lc. Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Jill R Reviewed-by: Markus Bergholz --- plugins/modules/ec2_lc.py | 2 +- tests/integration/targets/ec2_lc/aliases | 4 + .../targets/ec2_lc/defaults/main.yml | 7 + .../integration/targets/ec2_lc/meta/main.yml | 7 + .../targets/ec2_lc/tasks/env_cleanup.yml | 94 +++++++++ .../targets/ec2_lc/tasks/env_setup.yml | 64 ++++++ .../integration/targets/ec2_lc/tasks/main.yml | 192 ++++++++++++++++++ .../integration/targets/ec2_lc/vars/main.yml | 1 + 8 files changed, 370 insertions(+), 1 deletion(-) create mode 100644 tests/integration/targets/ec2_lc/aliases create mode 100644 tests/integration/targets/ec2_lc/defaults/main.yml create mode 100644 tests/integration/targets/ec2_lc/meta/main.yml create mode 100644 tests/integration/targets/ec2_lc/tasks/env_cleanup.yml create mode 100644 tests/integration/targets/ec2_lc/tasks/env_setup.yml create mode 100644 tests/integration/targets/ec2_lc/tasks/main.yml create mode 100644 tests/integration/targets/ec2_lc/vars/main.yml diff --git a/plugins/modules/ec2_lc.py b/plugins/modules/ec2_lc.py index 2cdf0463863..19f8dfe2972 100644 --- a/plugins/modules/ec2_lc.py +++ b/plugins/modules/ec2_lc.py @@ -478,7 +478,7 @@ def create_block_device_meta(module, volume): if 'no_device' in volume: return_object['NoDevice'] = volume.get('no_device') - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'ips', 'encrypted']): + if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'encrypted']): return_object['Ebs'] = {} if 'snapshot' in volume: diff --git a/tests/integration/targets/ec2_lc/aliases b/tests/integration/targets/ec2_lc/aliases new file mode 100644 index 00000000000..df93245119a --- /dev/null +++ b/tests/integration/targets/ec2_lc/aliases @@ -0,0 +1,4 @@ +cloud/aws + +ec2_lc_info +ec2_lc_find diff --git a/tests/integration/targets/ec2_lc/defaults/main.yml b/tests/integration/targets/ec2_lc/defaults/main.yml new file mode 100644 index 00000000000..fbbeb54fdac --- /dev/null +++ b/tests/integration/targets/ec2_lc/defaults/main.yml @@ -0,0 +1,7 @@ +--- +# defaults file for ec2_instance +ec2_instance_name: '{{ resource_prefix }}-node' +ec2_instance_owner: 'integration-run-{{ resource_prefix }}' +ec2_instance_type: t2.micro +ec2_ami_name: "amzn-ami-hvm*" +alarm_prefix: "ansible-test" diff --git a/tests/integration/targets/ec2_lc/meta/main.yml b/tests/integration/targets/ec2_lc/meta/main.yml new file mode 100644 index 00000000000..e3fef1b994a --- /dev/null +++ b/tests/integration/targets/ec2_lc/meta/main.yml @@ -0,0 +1,7 @@ +dependencies: + - prepare_tests + - setup_ec2 + - setup_ec2_facts + - role: setup_botocore_pip + vars: + boto3_version: "1.17.86" diff --git a/tests/integration/targets/ec2_lc/tasks/env_cleanup.yml b/tests/integration/targets/ec2_lc/tasks/env_cleanup.yml new file mode 100644 index 00000000000..9e5ae6a9380 --- /dev/null +++ b/tests/integration/targets/ec2_lc/tasks/env_cleanup.yml @@ -0,0 +1,94 @@ +- name: remove any instances in the test VPC + ec2_instance: + filters: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: Get ENIs + ec2_eni_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + register: enis + +- name: delete all ENIs + ec2_eni: + eni_id: "{{ item.id }}" + state: absent + until: removed is not failed + with_items: "{{ enis.network_interfaces }}" + ignore_errors: yes + retries: 10 + +- name: remove the security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove routing rules + ec2_vpc_route_table: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + tags: + created: "{{ resource_prefix }}-route" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ testing_subnet_a.subnet.id }}" + - "{{ testing_subnet_b.subnet.id }}" + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove internet gateway + ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet A + ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.32.0/24 + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet B + ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.33.0/24 + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.22.32.0/23 + state: absent + tags: + Name: Ansible Testing VPC + tenancy: default + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/tests/integration/targets/ec2_lc/tasks/env_setup.yml b/tests/integration/targets/ec2_lc/tasks/env_setup.yml new file mode 100644 index 00000000000..88f5bb6fe22 --- /dev/null +++ b/tests/integration/targets/ec2_lc/tasks/env_setup.yml @@ -0,0 +1,64 @@ +- name: Create VPC for use in testing + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.22.32.0/23 + tags: + Name: Ansible ec2_lc Testing VPC + tenancy: default + register: testing_vpc + +- name: Create internet gateway for use in testing + ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: present + tags: + Name: Ansible ec2_lc Testing gateway + register: igw + +- name: Create default subnet in zone A + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.32.0/24 + az: "{{ aws_region }}a" + resource_tags: + Name: "{{ resource_prefix }}-subnet-a" + register: testing_subnet_a + +- name: Create secondary subnet in zone B + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.33.0/24 + az: "{{ aws_region }}b" + resource_tags: + Name: "{{ resource_prefix }}-subnet-b" + register: testing_subnet_b + +- name: create routing rules + ec2_vpc_route_table: + vpc_id: "{{ testing_vpc.vpc.id }}" + tags: + created: "{{ resource_prefix }}-route" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ testing_subnet_a.subnet.id }}" + - "{{ testing_subnet_b.subnet.id }}" + +- name: create a security group with the vpc + ec2_group: + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + register: sg diff --git a/tests/integration/targets/ec2_lc/tasks/main.yml b/tests/integration/targets/ec2_lc/tasks/main.yml new file mode 100644 index 00000000000..b8c255f4481 --- /dev/null +++ b/tests/integration/targets/ec2_lc/tasks/main.yml @@ -0,0 +1,192 @@ +- name: run ec2_lc tests + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + + block: + + - name: set up environment for testing. + include_tasks: env_setup.yml + + - name: Create launch configuration 1 + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc1' + image_id: '{{ ec2_ami_id }}' + assign_public_ip: yes + instance_type: '{{ ec2_instance_type }}' + security_groups: '{{ sg.group_id }}' + volumes: + - device_name: /dev/xvda + volume_size: 10 + volume_type: gp2 + delete_on_termination: true + register: lc_1_create + + - name: Gather information about launch configuration 1 + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + community.aws.ec2_lc_info: + name: '{{ resource_prefix }}-lc1' + register: lc_1_info_result + + - assert: + that: + - lc_1_create is changed + - '"autoscaling:CreateLaunchConfiguration" in lc_1_create.resource_actions' + - '"throughput" not in lc_1_info_result.launch_configurations[0].block_device_mappings[0].ebs' + - lc_1_info_result.launch_configurations[0].block_device_mappings[0].ebs.volume_size == 10 + - lc_1_info_result.launch_configurations[0].block_device_mappings[0].ebs.volume_type == 'gp2' + - lc_1_info_result.launch_configurations[0].instance_type == 't2.micro' + + - name: Create launch configuration 1 - Idempotency + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc1' + image_id: '{{ ec2_ami_id }}' + assign_public_ip: yes + instance_type: '{{ ec2_instance_type }}' + security_groups: '{{ sg.group_id }}' + volumes: + - device_name: /dev/xvda + volume_size: 10 + volume_type: gp2 + delete_on_termination: true + register: lc_1_create_idem + + - assert: + that: + - lc_1_create_idem is not changed + - '"autoscaling:CreateLaunchConfiguration" not in lc_1_create_idem.resource_actions' + + - name: Create launch configuration 2 + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc2' + image_id: '{{ ec2_ami_id }}' + assign_public_ip: yes + instance_type: 't3.small' + security_groups: '{{ sg.group_id }}' + volumes: + - device_name: /dev/xvda + volume_size: 10 + volume_type: gp2 + delete_on_termination: true + register: lc_2_create + + - name: Gather information about launch configuration 2 + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + community.aws.ec2_lc_info: + name: '{{ resource_prefix }}-lc2' + register: lc_2_info_result + + - assert: + that: + - lc_2_create is changed + - '"autoscaling:CreateLaunchConfiguration" in lc_2_create.resource_actions' + - '"throughput" not in lc_2_info_result.launch_configurations[0].block_device_mappings[0].ebs' + - lc_2_info_result.launch_configurations[0].block_device_mappings[0].ebs.volume_size == 10 + - lc_2_info_result.launch_configurations[0].block_device_mappings[0].ebs.volume_type == 'gp2' + - lc_2_info_result.launch_configurations[0].instance_type == 't3.small' + - '"autoscaling:CreateLaunchConfiguration" in lc_2_create.resource_actions' + + - name: Create launch configuration 2 - Idempotency + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc2' + image_id: '{{ ec2_ami_id }}' + assign_public_ip: yes + instance_type: '{{ ec2_instance_type }}' + security_groups: '{{ sg.group_id }}' + volumes: + - device_name: /dev/xvda + volume_size: 10 + volume_type: gp2 + delete_on_termination: true + register: lc_2_create_idem + + - assert: + that: + - lc_2_create_idem is not changed + - '"autoscaling:CreateLaunchConfiguration" not in lc_2_create_idem.resource_actions' + + - name: Search for the Launch Configurations that start with test resource_prefix + community.aws.ec2_lc_find: + name_regex: '{{ resource_prefix }}*' + sort_order: descending + register: lc_find_result + + - assert: + that: + - lc_find_result.results | length == 2 + - '"autoscaling:DescribeLaunchConfigurations" in lc_find_result.resource_actions' + + - name: Delete launch configuration 1 + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc1' + state: absent + register: lc_1_delete + + - assert: + that: + - lc_1_delete is changed + - '"autoscaling:DeleteLaunchConfiguration" in lc_1_delete.resource_actions' + + - name: Delete launch configuration 1 - Idempotency + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc1' + state: absent + register: lc_1_delete_idem + + - assert: + that: + - lc_1_delete_idem is not changed + - '"autoscaling:DeleteLaunchConfiguration" not in lc_1_delete_idem.resource_actions' + + - name: Gather information about launch configuration 1 + community.aws.ec2_lc_info: + name: '{{ resource_prefix }}-lc1' + register: lc_1_info_result + + - assert: + that: + - lc_1_info_result is not changed + - lc_1_info_result.launch_configurations | length == 0 + + - name: Delete launch configuration 2 + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc2' + state: absent + register: lc_2_delete + + - assert: + that: + - lc_2_delete is changed + - '"autoscaling:DeleteLaunchConfiguration" in lc_2_delete.resource_actions' + + - name: Delete launch configuration 2 - Idempotency + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc2' + state: absent + register: lc_2_delete_idem + + - assert: + that: + - lc_2_delete_idem is not changed + - '"autoscaling:DeleteLaunchConfiguration" not in lc_2_delete_idem.resource_actions' + + - name: Gather information about launch configuration 2 + community.aws.ec2_lc_info: + name: '{{ resource_prefix }}-lc2' + register: lc_2_info_result + + - assert: + that: + - lc_2_info_result is not changed + - lc_2_info_result.launch_configurations | length == 0 + + always: + + - include_tasks: env_cleanup.yml diff --git a/tests/integration/targets/ec2_lc/vars/main.yml b/tests/integration/targets/ec2_lc/vars/main.yml new file mode 100644 index 00000000000..ed97d539c09 --- /dev/null +++ b/tests/integration/targets/ec2_lc/vars/main.yml @@ -0,0 +1 @@ +--- From 91101624f5c21658d1e36d09f4beb474363bf661 Mon Sep 17 00:00:00 2001 From: Yuri Krysko Date: Tue, 25 Jan 2022 08:05:22 -0500 Subject: [PATCH 02/31] Add ability to manage resource policy for AWS Secrets Manager secrets (#843) Add ability to manage resource policy for AWS Secrets Manager secrets SUMMARY AWS Secrets Manager secrets support attaching resource policy. The benefit is huge when necessary to access secrets from other AWS accounts. This pull request adds ability to manage (add new/remove or modify existing) secrets resource policy. ISSUE TYPE Feature Pull Request COMPONENT NAME module: aws_secret ADDITIONAL INFORMATION Reviewed-by: Mark Woolley Reviewed-by: Yuri Krysko Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- ...add_aws_secret_resource_policy_support.yml | 2 + plugins/modules/aws_secret.py | 78 +++++++++++++++++-- .../targets/aws_secret/tasks/basic.yml | 50 ++++++++++++ .../aws_secret/templates/secret-policy.j2 | 11 +++ 4 files changed, 136 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/843-add_aws_secret_resource_policy_support.yml create mode 100644 tests/integration/targets/aws_secret/templates/secret-policy.j2 diff --git a/changelogs/fragments/843-add_aws_secret_resource_policy_support.yml b/changelogs/fragments/843-add_aws_secret_resource_policy_support.yml new file mode 100644 index 00000000000..c970a72b8a1 --- /dev/null +++ b/changelogs/fragments/843-add_aws_secret_resource_policy_support.yml @@ -0,0 +1,2 @@ +minor_changes: +- aws_secret - Add ``resource_policy`` parameter (https://github.com/ansible-collections/community.aws/pull/843). \ No newline at end of file diff --git a/plugins/modules/aws_secret.py b/plugins/modules/aws_secret.py index dfe1013194d..050b00f5ae8 100644 --- a/plugins/modules/aws_secret.py +++ b/plugins/modules/aws_secret.py @@ -6,7 +6,6 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type - DOCUMENTATION = r''' --- module: aws_secret @@ -54,6 +53,13 @@ - Specifies string or binary data that you want to encrypt and store in the new version of the secret. default: "" type: str + resource_policy: + description: + - Specifies JSON-formatted resource policy to attach to the secret. Useful when granting cross-account access + to secrets. + required: false + type: json + version_added: 3.1.0 tags: description: - Specifies a list of user-defined tags that are attached to the secret. @@ -73,7 +79,6 @@ ''' - EXAMPLES = r''' - name: Add string to AWS Secrets Manager community.aws.aws_secret: @@ -82,6 +87,14 @@ secret_type: 'string' secret: "{{ super_secret_string }}" +- name: Add a secret with resource policy attached + community.aws.aws_secret: + name: 'test_secret_string' + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + resource_policy: "{{ lookup('template', 'templates/resource_policy.json.j2', convert_data=False) | string }}" + - name: remove string from AWS Secrets Manager community.aws.aws_secret: name: 'test_secret_string' @@ -90,7 +103,6 @@ secret: "{{ super_secret_string }}" ''' - RETURN = r''' secret: description: The secret information @@ -133,6 +145,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict, compare_aws_tags, ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from traceback import format_exc +import json try: from botocore.exceptions import BotoCoreError, ClientError @@ -142,7 +157,7 @@ class Secret(object): """An object representation of the Secret described by the self.module args""" - def __init__(self, name, secret_type, secret, description="", kms_key_id=None, + def __init__(self, name, secret_type, secret, resource_policy=None, description="", kms_key_id=None, tags=None, lambda_arn=None, rotation_interval=None): self.name = name self.description = description @@ -152,6 +167,7 @@ def __init__(self, name, secret_type, secret, description="", kms_key_id=None, else: self.secret_type = "SecretString" self.secret = secret + self.resource_policy = resource_policy self.tags = tags or {} self.rotation_enabled = False if lambda_arn: @@ -185,6 +201,15 @@ def update_args(self): args[self.secret_type] = self.secret return args + @property + def secret_resource_policy_args(self): + args = { + "SecretId": self.name + } + if self.resource_policy: + args["ResourcePolicy"] = self.resource_policy + return args + @property def boto3_tags(self): return ansible_dict_to_boto3_tag_list(self.Tags) @@ -211,6 +236,15 @@ def get_secret(self, name): self.module.fail_json_aws(e, msg="Failed to describe secret") return secret + def get_resource_policy(self, name): + try: + resource_policy = self.client.get_resource_policy(SecretId=name) + except self.client.exceptions.ResourceNotFoundException: + resource_policy = None + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get secret resource policy") + return resource_policy + def create_secret(self, secret): if self.module.check_mode: self.module.exit_json(changed=True) @@ -227,13 +261,26 @@ def create_secret(self, secret): def update_secret(self, secret): if self.module.check_mode: self.module.exit_json(changed=True) - try: response = self.client.update_secret(**secret.update_args) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e, msg="Failed to update secret") return response + def put_resource_policy(self, secret): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + json.loads(secret.secret_resource_policy_args.get("ResourcePolicy")) + except (TypeError, ValueError) as e: + self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc()) + + try: + response = self.client.put_resource_policy(**secret.secret_resource_policy_args) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to update secret resource policy") + return response + def restore_secret(self, name): if self.module.check_mode: self.module.exit_json(changed=True) @@ -255,6 +302,15 @@ def delete_secret(self, name, recovery_window): self.module.fail_json_aws(e, msg="Failed to delete secret") return response + def delete_resource_policy(self, name): + if self.module.check_mode: + self.module.exit_json(changed=True) + try: + response = self.client.delete_resource_policy(SecretId=name) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to delete secret resource policy") + return response + def update_rotation(self, secret): if secret.rotation_enabled: try: @@ -334,6 +390,7 @@ def main(): 'kms_key_id': dict(), 'secret_type': dict(choices=['binary', 'string'], default="string"), 'secret': dict(default="", no_log=True), + 'resource_policy': dict(type='json', default=None), 'tags': dict(type='dict', default={}), 'rotation_lambda': dict(), 'rotation_interval': dict(type='int', default=30), @@ -352,6 +409,7 @@ def main(): module.params.get('secret'), description=module.params.get('description'), kms_key_id=module.params.get('kms_key_id'), + resource_policy=module.params.get('resource_policy'), tags=module.params.get('tags'), lambda_arn=module.params.get('rotation_lambda'), rotation_interval=module.params.get('rotation_interval') @@ -374,6 +432,8 @@ def main(): if state == 'present': if current_secret is None: result = secrets_mgr.create_secret(secret) + if secret.resource_policy and result.get("ARN"): + result = secrets_mgr.put_resource_policy(secret) changed = True else: if current_secret.get("DeletedDate"): @@ -385,6 +445,14 @@ def main(): if not rotation_match(secret, current_secret): result = secrets_mgr.update_rotation(secret) changed = True + current_resource_policy_response = secrets_mgr.get_resource_policy(secret.name) + current_resource_policy = current_resource_policy_response.get("ResourcePolicy") + if compare_policies(secret.resource_policy, current_resource_policy): + if secret.resource_policy is None and current_resource_policy: + result = secrets_mgr.delete_resource_policy(secret.name) + else: + result = secrets_mgr.put_resource_policy(secret) + changed = True current_tags = boto3_tag_list_to_ansible_dict(current_secret.get('Tags', [])) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, secret.tags) if tags_to_add: diff --git a/tests/integration/targets/aws_secret/tasks/basic.yml b/tests/integration/targets/aws_secret/tasks/basic.yml index 884fdc40d36..ffe5314c148 100644 --- a/tests/integration/targets/aws_secret/tasks/basic.yml +++ b/tests/integration/targets/aws_secret/tasks/basic.yml @@ -1,5 +1,12 @@ --- - block: + # ============================================================ + # Preparation + # ============================================================ + - name: 'Retrieve caller facts' + aws_caller_info: + register: aws_caller_info + # ============================================================ # Module parameter testing # ============================================================ @@ -101,6 +108,49 @@ that: - result.changed + - name: add resource policy to secret + aws_secret: + name: "{{ secret_name }}" + description: 'this is a change to this secret' + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + resource_policy: "{{ lookup('template', 'secret-policy.j2', convert_data=False) | string }}" + register: result + + - name: assert correct keys are returned + assert: + that: + - result.changed + + - name: remove existing resource policy from secret + aws_secret: + name: "{{ secret_name }}" + description: 'this is a change to this secret' + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + register: result + + - name: assert correct keys are returned + assert: + that: + - result.changed + + - name: remove resource policy from secret (idempotency) + aws_secret: + name: "{{ secret_name }}" + description: 'this is a change to this secret' + state: present + secret_type: 'string' + secret: "{{ super_secret_string }}" + register: result + + - name: assert no change happened + assert: + that: + - not result.changed + - name: remove secret aws_secret: name: "{{ secret_name }}" diff --git a/tests/integration/targets/aws_secret/templates/secret-policy.j2 b/tests/integration/targets/aws_secret/templates/secret-policy.j2 new file mode 100644 index 00000000000..77438091b25 --- /dev/null +++ b/tests/integration/targets/aws_secret/templates/secret-policy.j2 @@ -0,0 +1,11 @@ +{ + "Version" : "2012-10-17", + "Statement" : [ { + "Effect" : "Allow", + "Principal" : { + "AWS" : "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action" : "secretsmanager:*", + "Resource" : "*" + } ] +} \ No newline at end of file From dc26f89a04dc3240cde48e3611c7e522180852a4 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Tue, 25 Jan 2022 13:36:58 -0800 Subject: [PATCH 03/31] ec2_lc: add volume throughput parameter support (#790) ec2_lc: add volume throughput parameter support SUMMARY Adding throughput parameter support to ec2_lc. Fixes #784. ISSUE TYPE Feature Pull Request COMPONENT NAME community.aws.ec2_lc UPDATE: Integration tests being added in a separate PR: #824 Reviewed-by: Alina Buzachis Reviewed-by: Jill R --- ...90-ec2_lc-add-throughput-param-support.yml | 2 + plugins/modules/ec2_lc.py | 13 ++- .../integration/targets/ec2_lc/tasks/main.yml | 86 ++++++++++++++++++- 3 files changed, 99 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/790-ec2_lc-add-throughput-param-support.yml diff --git a/changelogs/fragments/790-ec2_lc-add-throughput-param-support.yml b/changelogs/fragments/790-ec2_lc-add-throughput-param-support.yml new file mode 100644 index 00000000000..aa7e3e09803 --- /dev/null +++ b/changelogs/fragments/790-ec2_lc-add-throughput-param-support.yml @@ -0,0 +1,2 @@ +minor_changes: +- ec2_lc - add support for throughput parameter (https://github.com/ansible-collections/community.aws/pull/790). diff --git a/plugins/modules/ec2_lc.py b/plugins/modules/ec2_lc.py index 19f8dfe2972..de3a7a5443f 100644 --- a/plugins/modules/ec2_lc.py +++ b/plugins/modules/ec2_lc.py @@ -107,6 +107,12 @@ description: - The number of IOPS per second to provision for the volume. - Required when I(volume_type=io1). + throughput: + type: int + description: + - The throughput to provision for a gp3 volume. + - Valid Range is a minimum value of 125 and a maximum value of 1000. + version_added: 3.1.0 encrypted: type: bool default: false @@ -478,7 +484,7 @@ def create_block_device_meta(module, volume): if 'no_device' in volume: return_object['NoDevice'] = volume.get('no_device') - if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'encrypted']): + if any(key in volume for key in ['snapshot', 'volume_size', 'volume_type', 'delete_on_termination', 'iops', 'throughput', 'encrypted']): return_object['Ebs'] = {} if 'snapshot' in volume: @@ -496,6 +502,11 @@ def create_block_device_meta(module, volume): if 'iops' in volume: return_object['Ebs']['Iops'] = volume.get('iops') + if 'throughput' in volume: + if volume.get('volume_type') != 'gp3': + module.fail_json(msg='The throughput parameter is supported only for GP3 volumes.') + return_object['Ebs']['Throughput'] = volume.get('throughput') + if 'encrypted' in volume: return_object['Ebs']['Encrypted'] = volume.get('encrypted') diff --git a/tests/integration/targets/ec2_lc/tasks/main.yml b/tests/integration/targets/ec2_lc/tasks/main.yml index b8c255f4481..e61fc6feb5b 100644 --- a/tests/integration/targets/ec2_lc/tasks/main.yml +++ b/tests/integration/targets/ec2_lc/tasks/main.yml @@ -112,6 +112,58 @@ - lc_2_create_idem is not changed - '"autoscaling:CreateLaunchConfiguration" not in lc_2_create_idem.resource_actions' + - name: Create launch configuration 3 - test throughput parameter + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc3' + image_id: '{{ ec2_ami_id }}' + instance_type: '{{ ec2_instance_type }}' + volumes: + - device_name: /dev/sda1 + volume_size: 10 + volume_type: gp3 + throughput: 250 + delete_on_termination: true + register: lc_3_create + + - name: Gather information about launch configuration 3 + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + community.aws.ec2_lc_info: + name: '{{ resource_prefix }}-lc3' + register: lc_3_info_result + + - assert: + that: + - lc_3_create is changed + - '"throughput" in lc_3_info_result.launch_configurations[0].block_device_mappings[0].ebs' + - lc_3_info_result.launch_configurations[0].block_device_mappings[0].ebs.throughput == 250 + - lc_3_info_result.launch_configurations[0].block_device_mappings[0].ebs.volume_size == 10 + - lc_3_info_result.launch_configurations[0].block_device_mappings[0].ebs.volume_type == 'gp3' + - lc_3_info_result.launch_configurations[0].instance_type == 't2.micro' + - '"autoscaling:CreateLaunchConfiguration" in lc_3_create.resource_actions' + + - name: Create launch configuration 3 - Idempotency + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc3' + image_id: '{{ ec2_ami_id }}' + instance_type: '{{ ec2_instance_type }}' + volumes: + - device_name: /dev/sda1 + volume_size: 10 + volume_type: gp3 + throughput: 250 + delete_on_termination: true + register: lc_3_create_idem + + - assert: + that: + - lc_3_create_idem is not changed + - '"autoscaling:CreateLaunchConfiguration" not in lc_3_create_idem.resource_actions' + - name: Search for the Launch Configurations that start with test resource_prefix community.aws.ec2_lc_find: name_regex: '{{ resource_prefix }}*' @@ -120,7 +172,7 @@ - assert: that: - - lc_find_result.results | length == 2 + - lc_find_result.results | length == 3 - '"autoscaling:DescribeLaunchConfigurations" in lc_find_result.resource_actions' - name: Delete launch configuration 1 @@ -187,6 +239,38 @@ - lc_2_info_result is not changed - lc_2_info_result.launch_configurations | length == 0 + - name: Delete launch configuration 3 + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc3' + state: absent + register: lc_3_delete + + - assert: + that: + - lc_3_delete is changed + - '"autoscaling:DeleteLaunchConfiguration" in lc_3_delete.resource_actions' + + - name: Delete launch configuration 3 - Idempotency + community.aws.ec2_lc: + name: '{{ resource_prefix }}-lc3' + state: absent + register: lc_3_delete_idem + + - assert: + that: + - lc_3_delete_idem is not changed + - '"autoscaling:DeleteLaunchConfiguration" not in lc_3_delete_idem.resource_actions' + + - name: Gather information about launch configuration 3 + community.aws.ec2_lc_info: + name: '{{ resource_prefix }}-lc2' + register: lc_3_info_result + + - assert: + that: + - lc_3_info_result is not changed + - lc_3_info_result.launch_configurations | length == 0 + always: - include_tasks: env_cleanup.yml From 99c64a6e9993aed6a825798079a2b964904beb72 Mon Sep 17 00:00:00 2001 From: Hugh Saunders Date: Wed, 26 Jan 2022 11:39:09 +0000 Subject: [PATCH 04/31] Respect wait parameter in elb_instance when adding/removing instances (#826) Respect wait parameter in elb_instance when adding/removing instances SUMMARY The wait parameter is currently ignored when registering or deregistering an instance with an ELB. Looks like this was lost in the boto3 migration: 96f1518 Related: #825 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_instance ADDITIONAL INFORMATION See #825 Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- changelogs/fragments/825-fix-elb-wait.yml | 2 ++ plugins/modules/elb_instance.py | 10 ++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/825-fix-elb-wait.yml diff --git a/changelogs/fragments/825-fix-elb-wait.yml b/changelogs/fragments/825-fix-elb-wait.yml new file mode 100644 index 00000000000..afc87a06c56 --- /dev/null +++ b/changelogs/fragments/825-fix-elb-wait.yml @@ -0,0 +1,2 @@ +minor_changes: + - elb_instance - `wait` parameter is no longer ignored (https://github.com/ansible-collections/community.aws/pull/826) diff --git a/plugins/modules/elb_instance.py b/plugins/modules/elb_instance.py index 6116207866b..51ec03d5702 100644 --- a/plugins/modules/elb_instance.py +++ b/plugins/modules/elb_instance.py @@ -144,8 +144,9 @@ def deregister(self, wait, timeout): # already OutOfService is being deregistered. self.changed = True - for lb in self.lbs: - self._await_elb_instance_state(lb, 'Deregistered', timeout) + if wait: + for lb in self.lbs: + self._await_elb_instance_state(lb, 'Deregistered', timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB @@ -176,8 +177,9 @@ def register(self, wait, enable_availability_zone, timeout): self.changed = True - for lb in self.lbs: - self._await_elb_instance_state(lb, 'InService', timeout) + if wait: + for lb in self.lbs: + self._await_elb_instance_state(lb, 'InService', timeout) @AWSRetry.jittered_backoff() def _describe_elbs(self, **params): From 5e5f754736f4e851c446a1261cf3a1fd4b51f7b9 Mon Sep 17 00:00:00 2001 From: Sebastien Rosset Date: Fri, 28 Jan 2022 01:40:09 -0800 Subject: [PATCH 05/31] migrate from ansible.netcommon to ansible.utils (#882) migrate from ansible.netcommon to ansible.utils SUMMARY This is a maintenance task to migrate from ansible.netcommon to ansible.utils. I was trying to fix an issue in that module, but I was told the functions have moved to ansible.utils. See ansible-collections/ansible.netcommon#362 (comment) ISSUE TYPE Bugfix Pull Request COMPONENT NAME Existing integration tests that use the ansible.netcommon module. No module uses netcommon, only integration tests. ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- test-requirements.txt | 2 +- tests/integration/requirements.txt | 2 +- .../targets/ec2_eip/tasks/main.yml | 30 +++++++++---------- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/test-requirements.txt b/test-requirements.txt index d809cdbfa75..41566c53e71 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,7 +10,7 @@ mock pytest-xdist # We should avoid these two modules with py3 pytest-mock -# Needed for ansible.netcommon.ipaddr in tests +# Needed for ansible.utils.ipaddr in tests netaddr # Sometimes needed where we don't have features we need in modules awscli diff --git a/tests/integration/requirements.txt b/tests/integration/requirements.txt index 70f48bcf09f..387e8069351 100644 --- a/tests/integration/requirements.txt +++ b/tests/integration/requirements.txt @@ -3,7 +3,7 @@ boto boto3 botocore -# netaddr is needed for ansible.netcommon.ipv6 +# netaddr is needed for ansible.utils.ipv6 netaddr virtualenv # Sometimes needed where we don't have features we need in modules diff --git a/tests/integration/targets/ec2_eip/tasks/main.yml b/tests/integration/targets/ec2_eip/tasks/main.yml index 48db1d1048a..66e2eb5b4ba 100644 --- a/tests/integration/targets/ec2_eip/tasks/main.yml +++ b/tests/integration/targets/ec2_eip/tasks/main.yml @@ -108,7 +108,7 @@ that: - eip is defined - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr ) + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -160,7 +160,7 @@ that: - eip is defined - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr ) + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -175,7 +175,7 @@ that: - reallocate_eip is defined - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.netcommon.ipaddr ) + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -202,7 +202,7 @@ that: - eip is defined - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr ) + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -217,7 +217,7 @@ that: - reallocate_eip is defined - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.netcommon.ipaddr ) + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -244,7 +244,7 @@ that: - eip is defined - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr ) + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -260,7 +260,7 @@ that: - no_tagged_eip is defined - no_tagged_eip is changed - - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.netcommon.ipaddr ) + - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr ) - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) @@ -283,7 +283,7 @@ that: - reallocate_eip is defined - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.netcommon.ipaddr ) + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) @@ -300,7 +300,7 @@ that: - backend_eip is defined - backend_eip is changed - - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.netcommon.ipaddr ) + - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr ) - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) @@ -378,7 +378,7 @@ that: - eip is defined - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr ) + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) @@ -413,7 +413,7 @@ - eip_info.addresses[0].public_ip == eip.public_ip - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr ) + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - eip_info.addresses[0].network_interface_owner_id == caller_info.account - name: Re-Attach EIP to ENI A (no change) @@ -437,7 +437,7 @@ - eip_info.addresses[0].public_ip == eip.public_ip - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr ) + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - name: Attach EIP to ENI B (should fail, already associated) ec2_eip: @@ -459,7 +459,7 @@ - eip_info.addresses[0].public_ip == eip.public_ip - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr ) + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - name: Attach EIP to ENI B ec2_eip: @@ -483,7 +483,7 @@ - eip_info.addresses[0].public_ip == eip.public_ip - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.netcommon.ipaddr ) + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - name: Detach EIP from ENI B, without enabling release on disassociation ec2_eip: @@ -667,7 +667,7 @@ that: - eip is defined - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.netcommon.ipaddr ) + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) From ab4bda24245cf103212e0a64bff3cb5b985eb9da Mon Sep 17 00:00:00 2001 From: Andreas Jonsson Date: Sat, 29 Jan 2022 17:11:14 -0800 Subject: [PATCH 06/31] Lambda - Wait before updating (#857) Lambda - Wait before updating SUMMARY Updated lambda module to wait for State = Active & LastUpdateStatus = Successful based on https://aws.amazon.com/blogs/compute/coming-soon-expansion-of-aws-lambda-states-to-all-functions/ Fixes #830 ISSUE TYPE Bugfix Pull Request COMPONENT NAME module: lambda ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- .../fragments/857-lambda-wait-before.yml | 3 +++ plugins/modules/execute_lambda.py | 13 ++++++++++++ plugins/modules/lambda.py | 20 ++++++++++++++++++- 3 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/857-lambda-wait-before.yml diff --git a/changelogs/fragments/857-lambda-wait-before.yml b/changelogs/fragments/857-lambda-wait-before.yml new file mode 100644 index 00000000000..87116dfe916 --- /dev/null +++ b/changelogs/fragments/857-lambda-wait-before.yml @@ -0,0 +1,3 @@ +bugfixes: + - execute_lambda - Wait for Lambda function State = Active before executing (https://github.com/ansible-collections/community.aws/pull/857) + - lambda - Wait for Lambda function State = Active & LastUpdateStatus = Successful before updating (https://github.com/ansible-collections/community.aws/pull/857) diff --git a/plugins/modules/execute_lambda.py b/plugins/modules/execute_lambda.py index 7af644810a8..b4cbb4a53de 100644 --- a/plugins/modules/execute_lambda.py +++ b/plugins/modules/execute_lambda.py @@ -202,6 +202,9 @@ def main(): elif name: invoke_params['FunctionName'] = name + if not module.check_mode: + wait_for_lambda(client, module, name) + try: response = client.invoke(**invoke_params) except is_boto3_error_code('ResourceNotFoundException') as nfe: @@ -255,5 +258,15 @@ def main(): module.exit_json(changed=True, result=results) +def wait_for_lambda(client, module, name): + try: + waiter = client.get_waiter('function_active') + waiter.wait(FunctionName=name) + except botocore.exceptions.WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active') + + if __name__ == '__main__': main() diff --git a/plugins/modules/lambda.py b/plugins/modules/lambda.py index 1605d6497db..923b1646c3d 100644 --- a/plugins/modules/lambda.py +++ b/plugins/modules/lambda.py @@ -216,7 +216,7 @@ import re try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import ClientError, BotoCoreError, WaiterError except ImportError: pass # protected by AnsibleAWSModule @@ -320,6 +320,18 @@ def set_tag(client, module, tags, function): return changed +def wait_for_lambda(client, module, name): + try: + client_active_waiter = client.get_waiter('function_active') + client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter.wait(FunctionName=name) + client_updated_waiter.wait(FunctionName=name) + except WaiterError as e: + module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating') + except (ClientError, BotoCoreError) as e: + module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') + + def main(): argument_spec = dict( name=dict(required=True), @@ -453,6 +465,9 @@ def main(): # Upload new configuration if configuration has changed if len(func_kwargs) > 1: + if not check_mode: + wait_for_lambda(client, module, name) + try: if not check_mode: response = client.update_function_configuration(aws_retry=True, **func_kwargs) @@ -494,6 +509,9 @@ def main(): # Upload new code if needed (e.g. code checksum has changed) if len(code_kwargs) > 2: + if not check_mode: + wait_for_lambda(client, module, name) + try: if not check_mode: response = client.update_function_code(aws_retry=True, **code_kwargs) From 017081c914c33a4a9bfaecf66c36ee3463d2fa20 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Sun, 30 Jan 2022 02:16:55 +0100 Subject: [PATCH 07/31] Cleanup unused imports (#896) Cleanup unused imports (#852 / #892) Let's try once more... SUMMARY My local tests are flagging that we've picked up some unused imports again. ISSUE TYPE Feature Pull Request COMPONENT NAME aws_glue_job cloudfront_info rds_option_group_info ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- plugins/modules/aws_glue_job.py | 2 -- plugins/modules/cloudfront_info.py | 1 - plugins/modules/rds_option_group_info.py | 1 - 3 files changed, 4 deletions(-) diff --git a/plugins/modules/aws_glue_job.py b/plugins/modules/aws_glue_job.py index edca5d051d5..4e278c81734 100644 --- a/plugins/modules/aws_glue_job.py +++ b/plugins/modules/aws_glue_job.py @@ -245,9 +245,7 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info diff --git a/plugins/modules/cloudfront_info.py b/plugins/modules/cloudfront_info.py index e9136341c9f..b7914dcceb4 100644 --- a/plugins/modules/cloudfront_info.py +++ b/plugins/modules/cloudfront_info.py @@ -241,7 +241,6 @@ type: dict ''' -from functools import partial import traceback try: diff --git a/plugins/modules/rds_option_group_info.py b/plugins/modules/rds_option_group_info.py index b29479386ff..37e848032c8 100644 --- a/plugins/modules/rds_option_group_info.py +++ b/plugins/modules/rds_option_group_info.py @@ -244,7 +244,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags From d52052a6e6798ab970664009593ada991c46d86a Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Sun, 30 Jan 2022 13:05:46 +0100 Subject: [PATCH 08/31] fix yamllint errors (#903) aws_acm: fix yamllint errors in docs section SUMMARY Closes: #902 ISSUE TYPE Docs Pull Request COMPONENT NAME aws_acm ADDITIONAL INFORMATION 1:1 warning missing document start "---" (document-start) 2:81 error line too long (88 > 80 characters) (line-length) 5:81 error line too long (89 > 80 characters) (line-length) 9:81 error line too long (86 > 80 characters) (line-length) 11:81 error line too long (100 > 80 characters) (line-length) 32:81 error line too long (87 > 80 characters) (line-length) 36:81 error line too long (98 > 80 characters) (line-length) 41:81 error line too long (98 > 80 characters) (line-length) 43:81 error line too long (105 > 80 characters) (line-length) 49:81 error line too long (84 > 80 characters) (line-length) 56:81 error line too long (102 > 80 characters) (line-length) 61:81 error line too long (87 > 80 characters) (line-length) 69:81 error line too long (91 > 80 characters) (line-length) 79:81 error line too long (92 > 80 characters) (line-length) 88:81 error line too long (85 > 80 characters) (line-length) 105:81 error line too long (83 > 80 characters) (line-length) 123:1 error wrong indentation: expected 2 but found 0 (indentation) 124:17 error no new line character at the end of file (new-line-at-end-of-file) Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell --- plugins/modules/aws_acm.py | 71 ++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/plugins/modules/aws_acm.py b/plugins/modules/aws_acm.py index 65c95212170..d28301e9160 100644 --- a/plugins/modules/aws_acm.py +++ b/plugins/modules/aws_acm.py @@ -25,18 +25,22 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' +--- module: aws_acm -short_description: Upload and delete certificates in the AWS Certificate Manager service +short_description: > + Upload and delete certificates in the AWS Certificate Manager service version_added: 1.0.0 description: - - Import and delete certificates in Amazon Web Service's Certificate Manager (AWS ACM). + - > + Import and delete certificates in Amazon Web Service's Certificate + Manager (AWS ACM). - > This module does not currently interact with AWS-provided certificates. It currently only manages certificates provided to AWS by the user. - - The ACM API allows users to upload multiple certificates for the same domain name, - and even multiple identical certificates. - This module attempts to restrict such freedoms, to be idempotent, as per the Ansible philosophy. + - The ACM API allows users to upload multiple certificates for the same domain + name, and even multiple identical certificates. This module attempts to + restrict such freedoms, to be idempotent, as per the Ansible philosophy. It does this through applying AWS resource "Name" tags to ACM certificates. - > When I(state=present), @@ -57,63 +61,71 @@ this task will fail. - > When I(state=absent) and I(certificate_arn) is defined, - this module will delete the ACM resource with that ARN if it exists in this region, - and succeed without effect if it doesn't exist. + this module will delete the ACM resource with that ARN if it exists in this + region, and succeed without effect if it doesn't exist. - > - When I(state=absent) and I(domain_name) is defined, - this module will delete all ACM resources in this AWS region with a corresponding domain name. + When I(state=absent) and I(domain_name) is defined, this module will delete + all ACM resources in this AWS region with a corresponding domain name. If there are none, it will succeed without effect. - > When I(state=absent) and I(certificate_arn) is not defined, - and I(domain_name) is not defined, - this module will delete all ACM resources in this AWS region with a corresponding I(Name) tag. + and I(domain_name) is not defined, this module will delete all ACM resources + in this AWS region with a corresponding I(Name) tag. If there are none, it will succeed without effect. - - Note that this may not work properly with keys of size 4096 bits, due to a limitation of the ACM API. + - > + Note that this may not work properly with keys of size 4096 bits, due to a + limitation of the ACM API. options: certificate: description: - The body of the PEM encoded public certificate. - Required when I(state) is not C(absent). - - If your certificate is in a file, use C(lookup('file', 'path/to/cert.pem')). + - > + If your certificate is in a file, + use C(lookup('file', 'path/to/cert.pem')). type: str - certificate_arn: description: - The ARN of a certificate in ACM to delete - Ignored when I(state=present). - - If I(state=absent), you must provide one of I(certificate_arn), I(domain_name) or I(name_tag). + - > + If I(state=absent), you must provide one of + I(certificate_arn), I(domain_name) or I(name_tag). - > If I(state=absent) and no resource exists with this ARN in this region, the task will succeed with no effect. - > - If I(state=absent) and the corresponding resource exists in a different region, - this task may report success without deleting that resource. + If I(state=absent) and the corresponding resource exists in a different + region, this task may report success without deleting that resource. type: str aliases: [arn] - certificate_chain: description: - The body of the PEM encoded chain for your certificate. - - If your certificate chain is in a file, use C(lookup('file', 'path/to/chain.pem')). + - > + If your certificate chain is in a file, + use C(lookup('file', 'path/to/chain.pem')). - Ignored when I(state=absent) type: str - domain_name: description: - The domain name of the certificate. - > If I(state=absent) and I(domain_name) is specified, this task will delete all ACM certificates with this domain. - - Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) must be provided. + - > + Exactly one of I(domain_name), I(name_tag) and I(certificate_arn) + must be provided. - > If I(state=present) this must not be specified. (Since the domain name is encoded within the public certificate's body.) type: str aliases: [domain] - name_tag: description: - - The unique identifier for tagging resources using AWS tags, with key I(Name). + - > + The unique identifier for tagging resources using AWS tags, + with key I(Name). - This can be any set of characters accepted by AWS for tag values. - > This is to ensure Ansible can treat certificates idempotently, @@ -124,15 +136,15 @@ I(certificate_arn), I(domain_name) or I(name_tag). type: str aliases: [name] - private_key: description: - The body of the PEM encoded private key. - Required when I(state=present). - Ignored when I(state=absent). - - If your private key is in a file, use C(lookup('file', 'path/to/key.pem')). + - > + If your private key is in a file, + use C(lookup('file', 'path/to/key.pem')). type: str - state: description: - > @@ -148,8 +160,9 @@ author: - Matthew Davis (@matt-telstra) on behalf of Telstra Corporation Limited extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 + - amazon.aws.aws + - amazon.aws.ec2 + ''' EXAMPLES = ''' From 37d758ec03047e254aa462cba7e6c308c44a2682 Mon Sep 17 00:00:00 2001 From: Sebastien Rosset Date: Sun, 30 Jan 2022 04:12:43 -0800 Subject: [PATCH 09/31] improve doc (#901) Improve doc of ec2_launch_template module SUMMARY Add information about how the ec2_launch_template handles initial creation of the launch template, or creation of a new version of the launch template. ISSUE TYPE Docs Pull Request COMPONENT NAME ec2_launch_template ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell --- plugins/modules/ec2_launch_template.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/modules/ec2_launch_template.py b/plugins/modules/ec2_launch_template.py index e96049fa347..1051c1b7c66 100644 --- a/plugins/modules/ec2_launch_template.py +++ b/plugins/modules/ec2_launch_template.py @@ -31,6 +31,11 @@ template_name: description: - The template name. This must be unique in the region-account combination you are using. + - If no launch template exists with the specified name, a new launch template is created. + - If a launch template with the specified name already exists and the configuration has not changed, + nothing happens. + - If a launch template with the specified name already exists and the configuration has changed, + a new version of the launch template is created. aliases: [name] type: str default_version: From cecc9e8087ce0bd1eacebdd19b4c45a17070eafa Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 31 Jan 2022 10:29:42 +0000 Subject: [PATCH 10/31] Fix cloudfront_distribution s3_origin_access_identity_enabled bug (#881) Fix cloudfront_distribution s3_origin_access_identity_enabled bug SUMMARY If s3_origin_access_identity_enabled is set to True but no s3_origin_config then a default origin config is applied however it also picks up s3_origin_access_identity_enabled as S3OriginAccessIdentityEnabled and passes it to the API request which is not a valid option to be passed and then fails validation. Fixes: #749 ISSUE TYPE Bugfix Pull Request COMPONENT NAME cloudfront_distribution ADDITIONAL INFORMATION The option mention is not valid for the API request: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/cloudfront.html#CloudFront.Client.create_distribution Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- changelogs/fragments/881-cloudfront-bug.yml | 2 ++ plugins/modules/cloudfront_distribution.py | 14 +++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/881-cloudfront-bug.yml diff --git a/changelogs/fragments/881-cloudfront-bug.yml b/changelogs/fragments/881-cloudfront-bug.yml new file mode 100644 index 00000000000..106c9443723 --- /dev/null +++ b/changelogs/fragments/881-cloudfront-bug.yml @@ -0,0 +1,2 @@ +bugfixes: + - cloudfront_distribution - Dont pass ``s3_origin_access_identity_enabled`` to API request (https://github.com/ansible-collections/community.aws/pull/881). \ No newline at end of file diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py index 80ac6dcec4b..946b93e2041 100644 --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -1686,9 +1686,6 @@ def validate_origins(self, client, config, origins, default_origin_domain_name, self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): - if not origin['s3_origin_access_identity_enabled']: - return None - if origin.get('s3_origin_config', {}).get('origin_access_identity'): return origin['s3_origin_config']['origin_access_identity'] @@ -1719,13 +1716,20 @@ def validate_origin(self, client, existing_config, origin, default_origin_path): origin['custom_headers'] = ansible_list_to_cloudfront_list() if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): if origin.get("s3_origin_access_identity_enabled") is not None: - s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) + if origin['s3_origin_access_identity_enabled']: + s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) + else: + s3_origin_config = None + + del(origin["s3_origin_access_identity_enabled"]) + if s3_origin_config: oai = s3_origin_config else: oai = "" + origin["s3_origin_config"] = dict(origin_access_identity=oai) - del(origin["s3_origin_access_identity_enabled"]) + if 'custom_origin_config' in origin: self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") else: From 3f7d4a06be6afc2a4895df2d8680f4432a6cd267 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 31 Jan 2022 11:29:46 +0100 Subject: [PATCH 11/31] Bump release to 4.0.0-dev0 (#897) Bump release to 4.0.0-dev0 SUMMARY See also: ansible-collections/amazon.aws#639 Having released amazon.aws 3.0.0 and branched stable-3, bump the release to 4.0.0-dev0 The stable-3 branch now exists, as such we can start introducing the breaking changes slated for 4.0.0 rather than trying to push them in in the last few days before a release. ISSUE TYPE Feature Pull Request COMPONENT NAME galaxy.yml ADDITIONAL INFORMATION Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- galaxy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/galaxy.yml b/galaxy.yml index dce30d1244c..3f1c17016e2 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -1,6 +1,6 @@ namespace: community name: aws -version: 3.0.1 +version: 4.0.0-dev0 readme: README.md authors: - Ansible (https://github.com/ansible) From 00037befadcca989b9bd63a3ee92ce8732e88133 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 31 Jan 2022 12:26:36 +0100 Subject: [PATCH 12/31] [Breaking Change] Final removal of original boto SDK (#898) [Breaking Change] Final removal of original boto SDK SUMMARY Remove old boto based inventory script Clean up requirements Clean up random comments in docs/comments ISSUE TYPE Feature Pull Request COMPONENT NAME scripts/inventory/ec2.py requirements.txt test-requirements.txt tests/integration/requirements.txt tests/unit/requirements.txt ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- changelogs/fragments/898-boto-removal.yaml | 6 + .../aws_direct_connect_virtual_interface.py | 2 +- plugins/modules/ec2_eip.py | 2 +- plugins/modules/ecs_service.py | 2 +- plugins/modules/ecs_taskdefinition.py | 2 +- plugins/modules/iam_saml_federation.py | 2 +- plugins/modules/iam_server_certificate.py | 2 - plugins/modules/rds_param_group.py | 2 +- plugins/modules/route53.py | 2 +- requirements.txt | 2 - scripts/inventory/__init__.py | 0 scripts/inventory/ec2.ini | 219 --- scripts/inventory/ec2.py | 1699 ----------------- test-requirements.txt | 1 - tests/integration/requirements.txt | 1 - .../targets/script_inventory_ec2/aliases | 3 - .../targets/script_inventory_ec2/ec2.sh | 5 - .../script_inventory_ec2/inventory_diff.py | 67 - .../targets/script_inventory_ec2/runme.sh | 151 -- tests/unit/requirements.txt | 1 - 20 files changed, 13 insertions(+), 2158 deletions(-) create mode 100644 changelogs/fragments/898-boto-removal.yaml delete mode 100644 scripts/inventory/__init__.py delete mode 100644 scripts/inventory/ec2.ini delete mode 100644 scripts/inventory/ec2.py delete mode 100644 tests/integration/targets/script_inventory_ec2/aliases delete mode 100644 tests/integration/targets/script_inventory_ec2/ec2.sh delete mode 100644 tests/integration/targets/script_inventory_ec2/inventory_diff.py delete mode 100755 tests/integration/targets/script_inventory_ec2/runme.sh diff --git a/changelogs/fragments/898-boto-removal.yaml b/changelogs/fragments/898-boto-removal.yaml new file mode 100644 index 00000000000..bab13f3dfbe --- /dev/null +++ b/changelogs/fragments/898-boto-removal.yaml @@ -0,0 +1,6 @@ +breaking_changes: +- script_inventory_ec2 - The ec2.py inventory script has been moved to a new repository. + The script can now be downloaded from https://github.com/ansible-community/contrib-scripts/blob/main/inventory/ec2.py and has been removed from this collection. + We recommend migrating from the script to the amazon.aws.ec2 inventory plugin. (https://github.com/ansible-collections/community.aws/pull/898) +- community.aws collection - The ``community.aws`` collection has now dropped support for and any requirements upon the original ``boto`` AWS SDK, and now uses the ``boto3``/``botocore`` AWS SDK + (https://github.com/ansible-collections/community.aws/pull/898). diff --git a/plugins/modules/aws_direct_connect_virtual_interface.py b/plugins/modules/aws_direct_connect_virtual_interface.py index d2d199c5527..f0c1b7f7800 100644 --- a/plugins/modules/aws_direct_connect_virtual_interface.py +++ b/plugins/modules/aws_direct_connect_virtual_interface.py @@ -404,7 +404,7 @@ def create_vi(client, public, associated_id, creation_params): :param public: a boolean :param associated_id: a link aggregation group ID or connection ID to associate with the virtual interface. - :param creation_params: a dict of parameters to use in the boto call + :param creation_params: a dict of parameters to use in the AWS SDK call :return The ID of the created virtual interface ''' err_msg = "Failed to create virtual interface" diff --git a/plugins/modules/ec2_eip.py b/plugins/modules/ec2_eip.py index e38e941661f..ca883e5f715 100644 --- a/plugins/modules/ec2_eip.py +++ b/plugins/modules/ec2_eip.py @@ -494,7 +494,7 @@ def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True) def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): # type: (EC2Connection, str, bool, str) -> Address - """ Overrides boto's allocate_address function to support BYOIP """ + """ Overrides botocore's allocate_address function to support BYOIP """ params = {} if domain is not None: diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py index d43253af386..8e7adbcacc2 100644 --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -752,7 +752,7 @@ def main(): loadBalancer['containerPort'] = int(loadBalancer['containerPort']) if update: - # check various parameters and boto versions and give a helpful error in boto is not new enough for feature + # check various parameters and AWS SDK versions and give a helpful error if the SDK is not new enough for feature if module.params['scheduling_strategy']: if (existing['schedulingStrategy']) != module.params['scheduling_strategy']: diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py index ab3a47d176e..f99db8b9659 100644 --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -685,7 +685,7 @@ def register_task(self, family, task_role_arn, execution_role_arn, network_mode, volumes, launch_type, cpu, memory, placement_constraints): validated_containers = [] - # Ensures the number parameters are int as required by boto + # Ensures the number parameters are int as required by the AWS SDK for container in container_definitions: for param in ('memory', 'cpu', 'memoryReservation', 'startTimeout', 'stopTimeout'): if param in container: diff --git a/plugins/modules/iam_saml_federation.py b/plugins/modules/iam_saml_federation.py index 4b41f443134..70bd4461d10 100644 --- a/plugins/modules/iam_saml_federation.py +++ b/plugins/modules/iam_saml_federation.py @@ -120,7 +120,7 @@ def __init__(self, module): try: self.conn = module.client('iam') except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Unknown boto error") + self.module.fail_json_aws(e, msg="Unknown AWS SDK error") # use retry decorator for boto3 calls @AWSRetry.jittered_backoff(retries=3, delay=5) diff --git a/plugins/modules/iam_server_certificate.py b/plugins/modules/iam_server_certificate.py index b6cad710fb3..142d391ac06 100644 --- a/plugins/modules/iam_server_certificate.py +++ b/plugins/modules/iam_server_certificate.py @@ -86,8 +86,6 @@ extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 -requirements: -- boto >= 2.49.0 ''' EXAMPLES = ''' diff --git a/plugins/modules/rds_param_group.py b/plugins/modules/rds_param_group.py index 76e6138b466..7d5d216d092 100644 --- a/plugins/modules/rds_param_group.py +++ b/plugins/modules/rds_param_group.py @@ -159,7 +159,7 @@ def convert_parameter(param, value): converted_value = int(value[:-1]) * INT_MODIFIERS[modifier] except ValueError: # may be based on a variable (ie. {foo*3/4}) so - # just pass it on through to boto + # just pass it on through to the AWS SDK pass elif isinstance(value, bool): converted_value = 1 if value else 0 diff --git a/plugins/modules/route53.py b/plugins/modules/route53.py index 4275d65b684..4ddacdca09e 100644 --- a/plugins/modules/route53.py +++ b/plugins/modules/route53.py @@ -413,7 +413,7 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): if private_zone == want_private and zone['Name'] == zone_name: if want_vpc_id: - # NOTE: These details aren't available in other boto methods, hence the necessary + # NOTE: These details aren't available in other boto3 methods, hence the necessary # extra API call hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: diff --git a/requirements.txt b/requirements.txt index 1ff3ca2feb4..1a52353f680 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,5 +4,3 @@ # - tests/integration/targets/setup_botocore_pip botocore>=1.19.0 boto3>=1.16.0 -# Final released version -boto>=2.49.0 diff --git a/scripts/inventory/__init__.py b/scripts/inventory/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/scripts/inventory/ec2.ini b/scripts/inventory/ec2.ini deleted file mode 100644 index d5e4742032e..00000000000 --- a/scripts/inventory/ec2.ini +++ /dev/null @@ -1,219 +0,0 @@ -# Ansible EC2 external inventory script settings -# - -[ec2] - -# to talk to a private eucalyptus instance uncomment these lines -# and edit edit eucalyptus_host to be the host name of your cloud controller -#eucalyptus = True -#eucalyptus_host = clc.cloud.domain.org - -# AWS regions to make calls to. Set this to 'all' to make request to all regions -# in AWS and merge the results together. Alternatively, set this to a comma -# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not -# provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or -# AWS_DEFAULT_REGION environment variable will be read to determine the region. -regions = all -regions_exclude = us-gov-west-1, cn-north-1 - -# When generating inventory, Ansible needs to know how to address a server. -# Each EC2 instance has a lot of variables associated with it. Here is the list: -# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance -# Below are 2 variables that are used as the address of a server: -# - destination_variable -# - vpc_destination_variable - -# This is the normal destination variable to use. If you are running Ansible -# from outside EC2, then 'public_dns_name' makes the most sense. If you are -# running Ansible from within EC2, then perhaps you want to use the internal -# address, and should set this to 'private_dns_name'. The key of an EC2 tag -# may optionally be used; however the boto instance variables hold precedence -# in the event of a collision. -destination_variable = public_dns_name - -# This allows you to override the inventory_name with an ec2 variable, instead -# of using the destination_variable above. Addressing (aka ansible_ssh_host) -# will still use destination_variable. Tags should be written as 'tag_TAGNAME'. -#hostname_variable = tag_Name - -# For server inside a VPC, using DNS names may not make sense. When an instance -# has 'subnet_id' set, this variable is used. If the subnet is public, setting -# this to 'ip_address' will return the public IP address. For instances in a -# private subnet, this should be set to 'private_ip_address', and Ansible must -# be run from within EC2. The key of an EC2 tag may optionally be used; however -# the boto instance variables hold precedence in the event of a collision. -# WARNING: - instances that are in the private vpc, _without_ public ip address -# will not be listed in the inventory until You set: -# vpc_destination_variable = private_ip_address -vpc_destination_variable = ip_address - -# The following two settings allow flexible ansible host naming based on a -# python format string and a comma-separated list of ec2 tags. Note that: -# -# 1) If the tags referenced are not present for some instances, empty strings -# will be substituted in the format string. -# 2) This overrides both destination_variable and vpc_destination_variable. -# -#destination_format = {0}.{1}.example.com -#destination_format_tags = Name,environment - -# To tag instances on EC2 with the resource records that point to them from -# Route53, set 'route53' to True. -route53 = False - -# To use Route53 records as the inventory hostnames, uncomment and set -# to equal the domain name you wish to use. You must also have 'route53' (above) -# set to True. -# route53_hostnames = .example.com - -# To exclude RDS instances from the inventory, uncomment and set to False. -#rds = False - -# To exclude ElastiCache instances from the inventory, uncomment and set to False. -#elasticache = False - -# Additionally, you can specify the list of zones to exclude looking up in -# 'route53_excluded_zones' as a comma-separated list. -# route53_excluded_zones = samplezone1.com, samplezone2.com - -# By default, only EC2 instances in the 'running' state are returned. Set -# 'all_instances' to True to return all instances regardless of state. -all_instances = False - -# By default, only EC2 instances in the 'running' state are returned. Specify -# EC2 instance states to return as a comma-separated list. This -# option is overridden when 'all_instances' is True. -# instance_states = pending, running, shutting-down, terminated, stopping, stopped - -# By default, only RDS instances in the 'available' state are returned. Set -# 'all_rds_instances' to True return all RDS instances regardless of state. -all_rds_instances = False - -# Include RDS cluster information (Aurora etc.) -include_rds_clusters = False - -# By default, only ElastiCache clusters and nodes in the 'available' state -# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' -# to True return all ElastiCache clusters and nodes, regardless of state. -# -# Note that all_elasticache_nodes only applies to listed clusters. That means -# if you set all_elastic_clusters to false, no node will be return from -# unavailable clusters, regardless of the state and to what you set for -# all_elasticache_nodes. -all_elasticache_replication_groups = False -all_elasticache_clusters = False -all_elasticache_nodes = False - -# API calls to EC2 are slow. For this reason, we cache the results of an API -# call. Set this to the path you want cache files to be written to. Two files -# will be written to this directory: -# - ansible-ec2.cache -# - ansible-ec2.index -cache_path = ~/.ansible/tmp - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# To disable the cache, set this value to 0 -cache_max_age = 300 - -# Organize groups into a nested/hierarchy instead of a flat namespace. -nested_groups = False - -# Replace - tags when creating groups to avoid issues with ansible -replace_dash_in_groups = True - -# If set to true, any tag of the form "a,b,c" is expanded into a list -# and the results are used to create additional tag_* inventory groups. -expand_csv_tags = False - -# The EC2 inventory output can become very large. To manage its size, -# configure which groups should be created. -group_by_instance_id = True -group_by_region = True -group_by_availability_zone = True -group_by_aws_account = False -group_by_ami_id = True -group_by_instance_type = True -group_by_instance_state = False -group_by_platform = True -group_by_key_pair = True -group_by_vpc_id = True -group_by_security_group = True -group_by_tag_keys = True -group_by_tag_none = True -group_by_route53_names = True -group_by_rds_engine = True -group_by_rds_parameter_group = True -group_by_elasticache_engine = True -group_by_elasticache_cluster = True -group_by_elasticache_parameter_group = True -group_by_elasticache_replication_group = True - -# If you only want to include hosts that match a certain regular expression -# pattern_include = staging-* - -# If you want to exclude any hosts that match a certain regular expression -# pattern_exclude = staging-* - -# Instance filters can be used to control which instances are retrieved for -# inventory. For the full list of possible filters, please read the EC2 API -# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters -# Filters are key/value pairs separated by '=', to list multiple filters use -# a list separated by commas. To "AND" criteria together, use "&". Note that -# the "AND" is not useful along with stack_filters and so such usage is not allowed. -# See examples below. - -# If you want to apply multiple filters simultaneously, set stack_filters to -# True. Default behaviour is to combine the results of all filters. Stacking -# allows the use of multiple conditions to filter down, for example by -# environment and type of host. -stack_filters = False - -# Retrieve only instances with (key=value) env=staging tag -# instance_filters = tag:env=staging - -# Retrieve only instances with role=webservers OR role=dbservers tag -# instance_filters = tag:role=webservers,tag:role=dbservers - -# Retrieve only t1.micro instances OR instances with tag env=staging -# instance_filters = instance-type=t1.micro,tag:env=staging - -# You can use wildcards in filter values also. Below will list instances which -# tag Name value matches webservers1* -# (ex. webservers15, webservers1a, webservers123 etc) -# instance_filters = tag:Name=webservers1* - -# Retrieve only instances of type t1.micro that also have tag env=stage -# instance_filters = instance-type=t1.micro&tag:env=stage - -# Retrieve instances of type t1.micro AND tag env=stage, as well as any instance -# that are of type m3.large, regardless of env tag -# instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large - -# An IAM role can be assumed, so all requests are run as that role. -# This can be useful for connecting across different accounts, or to limit user -# access -# iam_role = role-arn - -# A boto configuration profile may be used to separate out credentials -# see https://boto.readthedocs.io/en/latest/boto_config_tut.html -# boto_profile = some-boto-profile-name - - -[credentials] - -# The AWS credentials can optionally be specified here. Credentials specified -# here are ignored if the environment variable AWS_ACCESS_KEY_ID or -# AWS_PROFILE is set, or if the boto_profile property above is set. -# -# Supplying AWS credentials here is not recommended, as it introduces -# non-trivial security concerns. When going down this route, please make sure -# to set access permissions for this file correctly, e.g. handle it the same -# way as you would a private SSH key. -# -# Unlike the boto and AWS configure files, this section does not support -# profiles. -# -# aws_access_key_id = AXXXXXXXXXXXXXX -# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX -# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX diff --git a/scripts/inventory/ec2.py b/scripts/inventory/ec2.py deleted file mode 100644 index d903de14ac8..00000000000 --- a/scripts/inventory/ec2.py +++ /dev/null @@ -1,1699 +0,0 @@ -#!/usr/bin/env python -# Copyright: (c) 2012, Peter Sankauskas - -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -EC2 external inventory script -================================= - -Generates inventory that Ansible can understand by making API request to -AWS EC2 using the Boto library. - -NOTE: This script assumes Ansible is being executed where the environment -variables needed for Boto have already been set: - export AWS_ACCESS_KEY_ID='AK123' - export AWS_SECRET_ACCESS_KEY='abc123' - -Optional region environment variable if region is 'auto' - -This script also assumes that there is an ec2.ini file alongside it. To specify a -different path to ec2.ini, define the EC2_INI_PATH environment variable: - - export EC2_INI_PATH=/path/to/my_ec2.ini - -If you're using eucalyptus you need to set the above variables and -you need to define: - - export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus - -If you're using boto profiles (requires boto>=2.24.0) you can choose a profile -using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using -the AWS_PROFILE variable: - - AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml - -For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html - -You can filter for specific EC2 instances by creating an environment variable -named EC2_INSTANCE_FILTERS, which has the same format as the instance_filters -entry documented in ec2.ini. For example, to find all hosts whose name begins -with 'webserver', one might use: - - export EC2_INSTANCE_FILTERS='tag:Name=webserver*' - -When run against a specific host, this script returns the following variables: - - ec2_ami_launch_index - - ec2_architecture - - ec2_association - - ec2_attachTime - - ec2_attachment - - ec2_attachmentId - - ec2_block_devices - - ec2_client_token - - ec2_deleteOnTermination - - ec2_description - - ec2_deviceIndex - - ec2_dns_name - - ec2_eventsSet - - ec2_group_name - - ec2_hypervisor - - ec2_id - - ec2_image_id - - ec2_instanceState - - ec2_instance_type - - ec2_ipOwnerId - - ec2_ip_address - - ec2_item - - ec2_kernel - - ec2_key_name - - ec2_launch_time - - ec2_monitored - - ec2_monitoring - - ec2_networkInterfaceId - - ec2_ownerId - - ec2_persistent - - ec2_placement - - ec2_platform - - ec2_previous_state - - ec2_private_dns_name - - ec2_private_ip_address - - ec2_publicIp - - ec2_public_dns_name - - ec2_ramdisk - - ec2_reason - - ec2_region - - ec2_requester_id - - ec2_root_device_name - - ec2_root_device_type - - ec2_security_group_ids - - ec2_security_group_names - - ec2_shutdown_state - - ec2_sourceDestCheck - - ec2_spot_instance_request_id - - ec2_state - - ec2_state_code - - ec2_state_reason - - ec2_status - - ec2_subnet_id - - ec2_tenancy - - ec2_virtualization_type - - ec2_vpc_id - -These variables are pulled out of a boto.ec2.instance object. There is a lack of -consistency with variable spellings (camelCase and underscores) since this -just loops through all variables the object exposes. It is preferred to use the -ones with underscores when multiple exist. - -In addition, if an instance has AWS tags associated with it, each tag is a new -variable named: - - ec2_tag_[Key] = [Value] - -Security groups are comma-separated in 'ec2_security_group_ids' and -'ec2_security_group_names'. - -When destination_format and destination_format_tags are specified -the destination_format can be built from the instance tags and attributes. -The behavior will first check the user defined tags, then proceed to -check instance attributes, and finally if neither are found 'nil' will -be used instead. - -'my_instance': { - 'region': 'us-east-1', # attribute - 'availability_zone': 'us-east-1a', # attribute - 'private_dns_name': '172.31.0.1', # attribute - 'ec2_tag_deployment': 'blue', # tag - 'ec2_tag_clusterid': 'ansible', # tag - 'ec2_tag_Name': 'webserver', # tag - ... -} - -Inside of the ec2.ini file the following settings are specified: -... -destination_format: {0}-{1}-{2}-{3} -destination_format_tags: Name,clusterid,deployment,private_dns_name -... - -These settings would produce a destination_format as the following: -'webserver-ansible-blue-172.31.0.1' -''' - -import argparse -import json -import os -import re -import sys -from collections import defaultdict -from copy import deepcopy -from datetime import date, datetime -from time import time - -import boto -from boto import ec2 -from boto import elasticache -from boto import rds -from boto import route53 -from boto import sts - -HAS_BOTO3 = False -try: - # Used so that we can cleanly fail, some of our (optional) dependencies need this - import boto3 # pylint: disable=unused-import - HAS_BOTO3 = True -except ImportError: - pass - -from ansible.module_utils import six -from ansible.module_utils.six.moves import configparser -from ansible_collections.amazon.aws.plugins.module_utils import ec2 as ec2_utils - - -DEFAULTS = { - 'all_elasticache_clusters': 'False', - 'all_elasticache_nodes': 'False', - 'all_elasticache_replication_groups': 'False', - 'all_instances': 'False', - 'all_rds_instances': 'False', - 'aws_access_key_id': '', - 'aws_secret_access_key': '', - 'aws_security_token': '', - 'boto_profile': '', - 'cache_max_age': '300', - 'cache_path': '~/.ansible/tmp', - 'destination_variable': 'public_dns_name', - 'elasticache': 'True', - 'eucalyptus': 'False', - 'eucalyptus_host': '', - 'expand_csv_tags': 'False', - 'group_by_ami_id': 'True', - 'group_by_availability_zone': 'True', - 'group_by_aws_account': 'False', - 'group_by_elasticache_cluster': 'True', - 'group_by_elasticache_engine': 'True', - 'group_by_elasticache_parameter_group': 'True', - 'group_by_elasticache_replication_group': 'True', - 'group_by_instance_id': 'True', - 'group_by_instance_state': 'False', - 'group_by_instance_type': 'True', - 'group_by_key_pair': 'True', - 'group_by_platform': 'True', - 'group_by_rds_engine': 'True', - 'group_by_rds_parameter_group': 'True', - 'group_by_region': 'True', - 'group_by_route53_names': 'True', - 'group_by_security_group': 'True', - 'group_by_tag_keys': 'True', - 'group_by_tag_none': 'True', - 'group_by_vpc_id': 'True', - 'hostname_variable': '', - 'iam_role': '', - 'include_rds_clusters': 'False', - 'nested_groups': 'False', - 'pattern_exclude': '', - 'pattern_include': '', - 'rds': 'False', - 'regions': 'all', - 'regions_exclude': 'us-gov-west-1, cn-north-1', - 'replace_dash_in_groups': 'True', - 'route53': 'False', - 'route53_excluded_zones': '', - 'route53_hostnames': '', - 'stack_filters': 'False', - 'vpc_destination_variable': 'ip_address' -} - - -class Ec2Inventory(object): - - def _empty_inventory(self): - return {"_meta": {"hostvars": {}}} - - def _json_serial(self, obj): - """JSON serializer for objects not serializable by default json code""" - - if isinstance(obj, (datetime, date)): - return obj.isoformat() - raise TypeError("Type %s not serializable" % type(obj)) - - def __init__(self): - ''' Main execution path ''' - - # Inventory grouped by instance IDs, tags, security groups, regions, - # and availability zones - self.inventory = self._empty_inventory() - - self.aws_account_id = None - - # Index of hostname (address) to instance ID - self.index = {} - - # Boto profile to use (if any) - self.boto_profile = None - - # AWS credentials. - self.credentials = {} - - # Read settings and parse CLI arguments - self.parse_cli_args() - self.read_settings() - - # Make sure that profile_name is not passed at all if not set - # as pre 2.24 boto will fall over otherwise - if self.boto_profile: - if not hasattr(boto.ec2.EC2Connection, 'profile_name'): - self.fail_with_error("boto version must be >= 2.24 to use profile") - - # Cache - if self.args.refresh_cache: - self.do_api_calls_update_cache() - elif not self.is_cache_valid(): - self.do_api_calls_update_cache() - - # Data to print - if self.args.host: - data_to_print = self.get_host_info() - - elif self.args.list: - # Display list of instances for inventory - if self.inventory == self._empty_inventory(): - data_to_print = self.get_inventory_from_cache() - else: - data_to_print = self.json_format_dict(self.inventory, True) - - print(data_to_print) - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - - if os.path.isfile(self.cache_path_cache): - mod_time = os.path.getmtime(self.cache_path_cache) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - if os.path.isfile(self.cache_path_index): - return True - - return False - - def read_settings(self): - ''' Reads the settings from the ec2.ini file ''' - - scriptbasename = __file__ - scriptbasename = os.path.basename(scriptbasename) - scriptbasename = scriptbasename.replace('.py', '') - - defaults = { - 'ec2': { - 'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'), - 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) - } - } - - if six.PY3: - config = configparser.ConfigParser(DEFAULTS) - else: - config = configparser.SafeConfigParser(DEFAULTS) - ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) - ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) - - if not os.path.isfile(ec2_ini_path): - ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback']) - - if os.path.isfile(ec2_ini_path): - config.read(ec2_ini_path) - - # Add empty sections if they don't exist - try: - config.add_section('ec2') - except configparser.DuplicateSectionError: - pass - - try: - config.add_section('credentials') - except configparser.DuplicateSectionError: - pass - - # is eucalyptus? - self.eucalyptus = config.getboolean('ec2', 'eucalyptus') - self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') - - # Regions - self.regions = [] - config_regions = config.get('ec2', 'regions') - if (config_regions == 'all'): - if self.eucalyptus_host: - self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) - else: - config_regions_exclude = config.get('ec2', 'regions_exclude') - - for region_info in ec2.regions(): - if region_info.name not in config_regions_exclude: - self.regions.append(region_info.name) - else: - self.regions = config_regions.split(",") - if 'auto' in self.regions: - env_region = os.environ.get('AWS_REGION') - if env_region is None: - env_region = os.environ.get('AWS_DEFAULT_REGION') - self.regions = [env_region] - - # Destination addresses - self.destination_variable = config.get('ec2', 'destination_variable') - self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') - self.hostname_variable = config.get('ec2', 'hostname_variable') - - if config.has_option('ec2', 'destination_format') and \ - config.has_option('ec2', 'destination_format_tags'): - self.destination_format = config.get('ec2', 'destination_format') - self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') - else: - self.destination_format = None - self.destination_format_tags = None - - # Route53 - self.route53_enabled = config.getboolean('ec2', 'route53') - self.route53_hostnames = config.get('ec2', 'route53_hostnames') - - self.route53_excluded_zones = [] - self.route53_excluded_zones = [a for a in config.get('ec2', 'route53_excluded_zones').split(',') if a] - - # Include RDS instances? - self.rds_enabled = config.getboolean('ec2', 'rds') - - # Include RDS cluster instances? - self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') - - # Include ElastiCache instances? - self.elasticache_enabled = config.getboolean('ec2', 'elasticache') - - # Return all EC2 instances? - self.all_instances = config.getboolean('ec2', 'all_instances') - - # Instance states to be gathered in inventory. Default is 'running'. - # Setting 'all_instances' to 'yes' overrides this option. - ec2_valid_instance_states = [ - 'pending', - 'running', - 'shutting-down', - 'terminated', - 'stopping', - 'stopped' - ] - self.ec2_instance_states = [] - if self.all_instances: - self.ec2_instance_states = ec2_valid_instance_states - elif config.has_option('ec2', 'instance_states'): - for instance_state in config.get('ec2', 'instance_states').split(','): - instance_state = instance_state.strip() - if instance_state not in ec2_valid_instance_states: - continue - self.ec2_instance_states.append(instance_state) - else: - self.ec2_instance_states = ['running'] - - # Return all RDS instances? (if RDS is enabled) - self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') - - # Return all ElastiCache replication groups? (if ElastiCache is enabled) - self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') - - # Return all ElastiCache clusters? (if ElastiCache is enabled) - self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') - - # Return all ElastiCache nodes? (if ElastiCache is enabled) - self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') - - # boto configuration profile (prefer CLI argument then environment variables then config file) - self.boto_profile = self.args.boto_profile or \ - os.environ.get('AWS_PROFILE') or \ - config.get('ec2', 'boto_profile') - - # AWS credentials (prefer environment variables) - if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or - os.environ.get('AWS_PROFILE')): - - aws_access_key_id = config.get('credentials', 'aws_access_key_id') - aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') - aws_security_token = config.get('credentials', 'aws_security_token') - - if aws_access_key_id: - self.credentials = { - 'aws_access_key_id': aws_access_key_id, - 'aws_secret_access_key': aws_secret_access_key - } - if aws_security_token: - self.credentials['security_token'] = aws_security_token - - # Cache related - cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) - if self.boto_profile: - cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - cache_name = 'ansible-ec2' - cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) - if cache_id: - cache_name = '%s-%s' % (cache_name, cache_id) - cache_name += '-' + str(abs(hash(__file__)))[1:7] - self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) - self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) - self.cache_max_age = config.getint('ec2', 'cache_max_age') - - self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') - - # Configure nested groups instead of flat namespace. - self.nested_groups = config.getboolean('ec2', 'nested_groups') - - # Replace dash or not in group names - self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') - - # IAM role to assume for connection - self.iam_role = config.get('ec2', 'iam_role') - - # Configure which groups should be created. - - group_by_options = [a for a in DEFAULTS if a.startswith('group_by')] - for option in group_by_options: - setattr(self, option, config.getboolean('ec2', option)) - - # Do we need to just include hosts that match a pattern? - self.pattern_include = config.get('ec2', 'pattern_include') - if self.pattern_include: - self.pattern_include = re.compile(self.pattern_include) - - # Do we need to exclude hosts that match a pattern? - self.pattern_exclude = config.get('ec2', 'pattern_exclude') - if self.pattern_exclude: - self.pattern_exclude = re.compile(self.pattern_exclude) - - # Do we want to stack multiple filters? - self.stack_filters = config.getboolean('ec2', 'stack_filters') - - # Instance filters (see boto and EC2 API docs). Ignore invalid filters. - self.ec2_instance_filters = [] - - if config.has_option('ec2', 'instance_filters') or 'EC2_INSTANCE_FILTERS' in os.environ: - filters = os.getenv('EC2_INSTANCE_FILTERS', config.get('ec2', 'instance_filters') if config.has_option('ec2', 'instance_filters') else '') - - if self.stack_filters and '&' in filters: - self.fail_with_error("AND filters along with stack_filter enabled is not supported.\n") - - filter_sets = [f for f in filters.split(',') if f] - - for filter_set in filter_sets: - filters = {} - filter_set = filter_set.strip() - for instance_filter in filter_set.split("&"): - instance_filter = instance_filter.strip() - if not instance_filter or '=' not in instance_filter: - continue - filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] - if not filter_key: - continue - filters[filter_key] = filter_value - self.ec2_instance_filters.append(filters.copy()) - - def parse_cli_args(self): - ''' Command line argument processing ''' - - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') - parser.add_argument('--list', action='store_true', default=True, - help='List instances (default: True)') - parser.add_argument('--host', action='store', - help='Get all the variables about a specific instance') - parser.add_argument('--refresh-cache', action='store_true', default=False, - help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') - parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', - help='Use boto profile for connections to EC2') - self.args = parser.parse_args() - - def do_api_calls_update_cache(self): - ''' Do API calls to each region, and save data in cache files ''' - - if self.route53_enabled: - self.get_route53_records() - - for region in self.regions: - self.get_instances_by_region(region) - if self.rds_enabled: - self.get_rds_instances_by_region(region) - if self.elasticache_enabled: - self.get_elasticache_clusters_by_region(region) - self.get_elasticache_replication_groups_by_region(region) - if self.include_rds_clusters: - self.include_rds_clusters_by_region(region) - - self.write_to_cache(self.inventory, self.cache_path_cache) - self.write_to_cache(self.index, self.cache_path_index) - - def connect(self, region): - ''' create connection to api server''' - if self.eucalyptus: - conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) - conn.APIVersion = '2010-08-31' - else: - conn = self.connect_to_aws(ec2, region) - return conn - - def boto_fix_security_token_in_profile(self, connect_args): - ''' monkey patch for boto issue boto/boto#2100 ''' - profile = 'profile ' + self.boto_profile - if boto.config.has_option(profile, 'aws_security_token'): - connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') - return connect_args - - def connect_to_aws(self, module, region): - connect_args = deepcopy(self.credentials) - - # only pass the profile name if it's set (as it is not supported by older boto versions) - if self.boto_profile: - connect_args['profile_name'] = self.boto_profile - self.boto_fix_security_token_in_profile(connect_args) - elif os.environ.get('AWS_SESSION_TOKEN'): - connect_args['security_token'] = os.environ.get('AWS_SESSION_TOKEN') - - if self.iam_role: - sts_conn = sts.connect_to_region(region, **connect_args) - role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') - connect_args['aws_access_key_id'] = role.credentials.access_key - connect_args['aws_secret_access_key'] = role.credentials.secret_key - connect_args['security_token'] = role.credentials.session_token - - conn = module.connect_to_region(region, **connect_args) - # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported - if conn is None: - self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) - return conn - - def get_instances_by_region(self, region): - ''' Makes an AWS EC2 API call to the list of instances in a particular - region ''' - - try: - conn = self.connect(region) - reservations = [] - if self.ec2_instance_filters: - if self.stack_filters: - filters_dict = {} - for filters in self.ec2_instance_filters: - filters_dict.update(filters) - reservations.extend(conn.get_all_instances(filters=filters_dict)) - else: - for filters in self.ec2_instance_filters: - reservations.extend(conn.get_all_instances(filters=filters)) - else: - reservations = conn.get_all_instances() - - # Pull the tags back in a second step - # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not - # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` - instance_ids = [] - for reservation in reservations: - instance_ids.extend([instance.id for instance in reservation.instances]) - - max_filter_value = 199 - tags = [] - for i in range(0, len(instance_ids), max_filter_value): - tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) - - tags_by_instance_id = defaultdict(dict) - for tag in tags: - tags_by_instance_id[tag.res_id][tag.name] = tag.value - - if (not self.aws_account_id) and reservations: - self.aws_account_id = reservations[0].owner_id - - for reservation in reservations: - for instance in reservation.instances: - instance.tags = tags_by_instance_id[instance.id] - self.add_instance(instance, region) - - except boto.exception.BotoServerError as e: - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - else: - backend = 'Eucalyptus' if self.eucalyptus else 'AWS' - error = "Error connecting to %s backend.\n%s" % (backend, e.message) - self.fail_with_error(error, 'getting EC2 instances') - - def tags_match_filters(self, tags): - ''' return True if given tags match configured filters ''' - if not self.ec2_instance_filters: - return True - - for filters in self.ec2_instance_filters: - for filter_name, filter_value in filters.items(): - if filter_name[:4] != 'tag:': - continue - filter_name = filter_name[4:] - if filter_name not in tags: - if self.stack_filters: - return False - continue - if isinstance(filter_value, list): - if self.stack_filters and tags[filter_name] not in filter_value: - return False - if not self.stack_filters and tags[filter_name] in filter_value: - return True - if isinstance(filter_value, six.string_types): - if self.stack_filters and tags[filter_name] != filter_value: - return False - if not self.stack_filters and tags[filter_name] == filter_value: - return True - - return self.stack_filters - - def get_rds_instances_by_region(self, region): - ''' Makes an AWS API call to the list of RDS instances in a particular - region ''' - - if not HAS_BOTO3: - self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", - "getting RDS instances") - - client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) - db_instances = client.describe_db_instances() - - try: - conn = self.connect_to_aws(rds, region) - if conn: - marker = None - while True: - instances = conn.get_all_dbinstances(marker=marker) - marker = instances.marker - for index, instance in enumerate(instances): - # Add tags to instances. - instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] - tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] - instance.tags = {} - for tag in tags: - instance.tags[tag['Key']] = tag['Value'] - if self.tags_match_filters(instance.tags): - self.add_rds_instance(instance, region) - if not marker: - break - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - elif e.error_code == "OptInRequired": - error = "RDS hasn't been enabled for this account yet. " \ - "You must either log in to the RDS service through the AWS console to enable it, " \ - "or set 'rds = False' in ec2.ini" - elif not e.reason == "Forbidden": - error = "Looks like AWS RDS is down:\n%s" % e.message - self.fail_with_error(error, 'getting RDS instances') - - def include_rds_clusters_by_region(self, region): - if not HAS_BOTO3: - self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", - "getting RDS clusters") - - client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) - - marker, clusters = '', [] - while marker is not None: - resp = client.describe_db_clusters(Marker=marker) - clusters.extend(resp["DBClusters"]) - marker = resp.get('Marker', None) - - account_id = boto.connect_iam().get_user().arn.split(':')[4] - c_dict = {} - for c in clusters: - if not self.ec2_instance_filters: - matches_filter = True - else: - matches_filter = False - - try: - # arn:aws:rds:::: - tags = client.list_tags_for_resource( - ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) - c['Tags'] = tags['TagList'] - - if self.ec2_instance_filters: - for filters in self.ec2_instance_filters: - for filter_key, filter_values in filters.items(): - # get AWS tag key e.g. tag:env will be 'env' - tag_name = filter_key.split(":", 1)[1] - # Filter values is a list (if you put multiple values for the same tag name) - matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) - - if matches_filter: - # it matches a filter, so stop looking for further matches - break - - if matches_filter: - break - - except Exception as e: - if e.message.find('DBInstanceNotFound') >= 0: - # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. - # Ignore errors when trying to find tags for these - pass - - # ignore empty clusters caused by AWS bug - if len(c['DBClusterMembers']) == 0: - continue - elif matches_filter: - c_dict[c['DBClusterIdentifier']] = c - - self.inventory['db_clusters'] = c_dict - - def get_elasticache_clusters_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache clusters (with - nodes' info) in a particular region.''' - - # ElastiCache boto module doesn't provide a get_all_instances method, - # that's why we need to call describe directly (it would be called by - # the shorthand method anyway...) - clusters = [] - try: - conn = self.connect_to_aws(elasticache, region) - if conn: - # show_cache_node_info = True - # because we also want nodes' information - _marker = 1 - while _marker: - if _marker == 1: - _marker = None - response = conn.describe_cache_clusters(None, None, _marker, True) - _marker = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['Marker'] - try: - # Boto also doesn't provide wrapper classes to CacheClusters or - # CacheNodes. Because of that we can't make use of the get_list - # method in the AWSQueryConnection. Let's do the work manually - clusters = clusters + response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] - except KeyError as e: - error = "ElastiCache query to AWS failed (unexpected format)." - self.fail_with_error(error, 'getting ElastiCache clusters') - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - elif e.error_code == "OptInRequired": - error = "ElastiCache hasn't been enabled for this account yet. " \ - "You must either log in to the ElastiCache service through the AWS console to enable it, " \ - "or set 'elasticache = False' in ec2.ini" - elif not e.reason == "Forbidden": - error = "Looks like AWS ElastiCache is down:\n%s" % e.message - self.fail_with_error(error, 'getting ElastiCache clusters') - - for cluster in clusters: - self.add_elasticache_cluster(cluster, region) - - def get_elasticache_replication_groups_by_region(self, region): - ''' Makes an AWS API call to the list of ElastiCache replication groups - in a particular region.''' - - # ElastiCache boto module doesn't provide a get_all_instances method, - # that's why we need to call describe directly (it would be called by - # the shorthand method anyway...) - try: - conn = self.connect_to_aws(elasticache, region) - if conn: - response = conn.describe_replication_groups() - - except boto.exception.BotoServerError as e: - error = e.reason - - if e.error_code == 'AuthFailure': - error = self.get_auth_error_message() - if not e.reason == "Forbidden": - error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message - self.fail_with_error(error, 'getting ElastiCache clusters') - - try: - # Boto also doesn't provide wrapper classes to ReplicationGroups - # Because of that we can't make use of the get_list method in the - # AWSQueryConnection. Let's do the work manually - replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] - - except KeyError as e: - error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." - self.fail_with_error(error, 'getting ElastiCache clusters') - - for replication_group in replication_groups: - self.add_elasticache_replication_group(replication_group, region) - - def get_auth_error_message(self): - ''' create an informative error message if there is an issue authenticating''' - errors = ["Authentication error retrieving ec2 inventory."] - if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: - errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') - else: - errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') - - boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] - boto_config_found = [p for p in boto_paths if os.path.isfile(os.path.expanduser(p))] - if len(boto_config_found) > 0: - errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) - else: - errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) - - return '\n'.join(errors) - - def fail_with_error(self, err_msg, err_operation=None): - '''log an error to std err for ansible-playbook to consume and exit''' - if err_operation: - err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( - err_msg=err_msg, err_operation=err_operation) - sys.stderr.write(err_msg) - sys.exit(1) - - def get_instance(self, region, instance_id): - conn = self.connect(region) - - reservations = conn.get_all_instances([instance_id]) - for reservation in reservations: - for instance in reservation.instances: - return instance - - def add_instance(self, instance, region): - ''' Adds an instance to the inventory and index, as long as it is - addressable ''' - - # Only return instances with desired instance states - if instance.state not in self.ec2_instance_states: - return - - # Select the best destination address - # When destination_format and destination_format_tags are specified - # the following code will attempt to find the instance tags first, - # then the instance attributes next, and finally if neither are found - # assign nil for the desired destination format attribute. - if self.destination_format and self.destination_format_tags: - dest_vars = [] - inst_tags = getattr(instance, 'tags') - for tag in self.destination_format_tags: - if tag in inst_tags: - dest_vars.append(inst_tags[tag]) - elif hasattr(instance, tag): - dest_vars.append(getattr(instance, tag)) - else: - dest_vars.append('nil') - - dest = self.destination_format.format(*dest_vars) - elif instance.subnet_id: - dest = getattr(instance, self.vpc_destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) - else: - dest = getattr(instance, self.destination_variable, None) - if dest is None: - dest = getattr(instance, 'tags').get(self.destination_variable, None) - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Set the inventory name - hostname = None - if self.hostname_variable: - if self.hostname_variable.startswith('tag_'): - hostname = instance.tags.get(self.hostname_variable[4:], None) - else: - hostname = getattr(instance, self.hostname_variable) - - # set the hostname from route53 - if self.route53_enabled and self.route53_hostnames: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - if name.endswith(self.route53_hostnames): - hostname = name - - # If we can't get a nice hostname, use the destination address - if not hostname: - hostname = dest - # to_safe strips hostname characters like dots, so don't strip route53 hostnames - elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): - hostname = hostname.lower() - else: - hostname = self.to_safe(hostname).lower() - - # if we only want to include hosts that match a pattern, skip those that don't - if self.pattern_include and not self.pattern_include.match(hostname): - return - - # if we need to exclude hosts that match a pattern, skip those - if self.pattern_exclude and self.pattern_exclude.match(hostname): - return - - # Add to index - self.index[hostname] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [hostname] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.placement, hostname) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.placement) - self.push_group(self.inventory, 'zones', instance.placement) - - # Inventory: Group by Amazon Machine Image (AMI) ID - if self.group_by_ami_id: - ami_id = self.to_safe(instance.image_id) - self.push(self.inventory, ami_id, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'images', ami_id) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_type) - self.push(self.inventory, type_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by instance state - if self.group_by_instance_state: - state_name = self.to_safe('instance_state_' + instance.state) - self.push(self.inventory, state_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'instance_states', state_name) - - # Inventory: Group by platform - if self.group_by_platform: - if instance.platform: - platform = self.to_safe('platform_' + instance.platform) - else: - platform = self.to_safe('platform_undefined') - self.push(self.inventory, platform, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'platforms', platform) - - # Inventory: Group by key pair - if self.group_by_key_pair and instance.key_name: - key_name = self.to_safe('key_' + instance.key_name) - self.push(self.inventory, key_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'keys', key_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) - self.push(self.inventory, vpc_id_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - for group in instance.groups: - key = self.to_safe("security_group_" + group.name) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', - 'Please upgrade boto >= 2.3.0.'])) - - # Inventory: Group by AWS account ID - if self.group_by_aws_account: - self.push(self.inventory, self.aws_account_id, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'accounts', self.aws_account_id) - - # Inventory: Group by tag keys - if self.group_by_tag_keys: - for k, v in instance.tags.items(): - if self.expand_csv_tags and v and ',' in v: - values = map(lambda x: x.strip(), v.split(',')) - else: - values = [v] - - for v in values: - if v: - key = self.to_safe("tag_" + k + "=" + v) - else: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - if v: - self.push_group(self.inventory, self.to_safe("tag_" + k), key) - - # Inventory: Group by Route53 domain names if enabled - if self.route53_enabled and self.group_by_route53_names: - route53_names = self.get_instance_route53_names(instance) - for name in route53_names: - self.push(self.inventory, name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'route53', name) - - # Global Tag: instances without tags - if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: tag all EC2 instances - self.push(self.inventory, 'ec2', hostname) - - self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) - self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest - - def add_rds_instance(self, instance, region): - ''' Adds an RDS instance to the inventory and index, as long as it is - addressable ''' - - # Only want available instances unless all_rds_instances is True - if not self.all_rds_instances and instance.status != 'available': - return - - # Select the best destination address - dest = instance.endpoint[0] - - if not dest: - # Skip instances we cannot address (e.g. private VPC subnet) - return - - # Set the inventory name - hostname = None - if self.hostname_variable: - if self.hostname_variable.startswith('tag_'): - hostname = instance.tags.get(self.hostname_variable[4:], None) - else: - hostname = getattr(instance, self.hostname_variable) - - # If we can't get a nice hostname, use the destination address - if not hostname: - hostname = dest - - hostname = self.to_safe(hostname).lower() - - # Add to index - self.index[hostname] = [region, instance.id] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[instance.id] = [hostname] - if self.nested_groups: - self.push_group(self.inventory, 'instances', instance.id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, instance.availability_zone, hostname) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, instance.availability_zone) - self.push_group(self.inventory, 'zones', instance.availability_zone) - - # Inventory: Group by instance type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + instance.instance_class) - self.push(self.inventory, type_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC - if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: - vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) - self.push(self.inventory, vpc_id_name, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'vpcs', vpc_id_name) - - # Inventory: Group by security group - if self.group_by_security_group: - try: - if instance.security_group: - key = self.to_safe("security_group_" + instance.security_group.name) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - except AttributeError: - self.fail_with_error('\n'.join(['Package boto seems a bit older.', - 'Please upgrade boto >= 2.3.0.'])) - # Inventory: Group by tag keys - if self.group_by_tag_keys: - for k, v in instance.tags.items(): - if self.expand_csv_tags and v and ',' in v: - values = map(lambda x: x.strip(), v.split(',')) - else: - values = [v] - - for v in values: - if v: - key = self.to_safe("tag_" + k + "=" + v) - else: - key = self.to_safe("tag_" + k) - self.push(self.inventory, key, hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) - if v: - self.push_group(self.inventory, self.to_safe("tag_" + k), key) - - # Inventory: Group by engine - if self.group_by_rds_engine: - self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) - if self.nested_groups: - self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) - - # Inventory: Group by parameter group - if self.group_by_rds_parameter_group: - self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) - if self.nested_groups: - self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) - - # Global Tag: instances without tags - if self.group_by_tag_none and len(instance.tags) == 0: - self.push(self.inventory, 'tag_none', hostname) - if self.nested_groups: - self.push_group(self.inventory, 'tags', 'tag_none') - - # Global Tag: all RDS instances - self.push(self.inventory, 'rds', hostname) - - self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) - self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest - - def add_elasticache_cluster(self, cluster, region): - ''' Adds an ElastiCache cluster to the inventory and index, as long as - it's nodes are addressable ''' - - # Only want available clusters unless all_elasticache_clusters is True - if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': - return - - # Select the best destination address - if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: - # Memcached cluster - dest = cluster['ConfigurationEndpoint']['Address'] - is_redis = False - else: - # Redis sigle node cluster - # Because all Redis clusters are single nodes, we'll merge the - # info from the cluster with info about the node - dest = cluster['CacheNodes'][0]['Endpoint']['Address'] - is_redis = True - - if not dest: - # Skip clusters we cannot address (e.g. private VPC subnet) - return - - # Add to index - self.index[dest] = [region, cluster['CacheClusterId']] - - # Inventory: Group by instance ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[cluster['CacheClusterId']] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) - - # Inventory: Group by region - if self.group_by_region and not is_redis: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone and not is_redis: - self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) - self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) - - # Inventory: Group by node type - if self.group_by_instance_type and not is_redis: - type_name = self.to_safe('type_' + cluster['CacheNodeType']) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for ElastiCache) - - # Inventory: Group by security group - if self.group_by_security_group and not is_redis: - - # Check for the existence of the 'SecurityGroups' key and also if - # this key has some value. When the cluster is not placed in a SG - # the query can return None here and cause an error. - if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: - for security_group in cluster['SecurityGroups']: - key = self.to_safe("security_group_" + security_group['SecurityGroupId']) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - # Inventory: Group by engine - if self.group_by_elasticache_engine and not is_redis: - self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) - - # Inventory: Group by parameter group - if self.group_by_elasticache_parameter_group: - self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) - - # Inventory: Group by replication group - if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: - self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) - - # Global Tag: all ElastiCache clusters - self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) - - host_info = self.get_host_info_dict_from_describe_dict(cluster) - - self.inventory["_meta"]["hostvars"][dest] = host_info - - # Add the nodes - for node in cluster['CacheNodes']: - self.add_elasticache_node(node, cluster, region) - - def add_elasticache_node(self, node, cluster, region): - ''' Adds an ElastiCache node to the inventory and index, as long as - it is addressable ''' - - # Only want available nodes unless all_elasticache_nodes is True - if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': - return - - # Select the best destination address - dest = node['Endpoint']['Address'] - - if not dest: - # Skip nodes we cannot address (e.g. private VPC subnet) - return - - node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) - - # Add to index - self.index[dest] = [region, node_id] - - # Inventory: Group by node ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[node_id] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', node_id) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone - if self.group_by_availability_zone: - self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) - if self.nested_groups: - if self.group_by_region: - self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) - self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) - - # Inventory: Group by node type - if self.group_by_instance_type: - type_name = self.to_safe('type_' + cluster['CacheNodeType']) - self.push(self.inventory, type_name, dest) - if self.nested_groups: - self.push_group(self.inventory, 'types', type_name) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for ElastiCache) - - # Inventory: Group by security group - if self.group_by_security_group: - - # Check for the existence of the 'SecurityGroups' key and also if - # this key has some value. When the cluster is not placed in a SG - # the query can return None here and cause an error. - if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: - for security_group in cluster['SecurityGroups']: - key = self.to_safe("security_group_" + security_group['SecurityGroupId']) - self.push(self.inventory, key, dest) - if self.nested_groups: - self.push_group(self.inventory, 'security_groups', key) - - # Inventory: Group by engine - if self.group_by_elasticache_engine: - self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) - - # Inventory: Group by parameter group (done at cluster level) - - # Inventory: Group by replication group (done at cluster level) - - # Inventory: Group by ElastiCache Cluster - if self.group_by_elasticache_cluster: - self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) - - # Global Tag: all ElastiCache nodes - self.push(self.inventory, 'elasticache_nodes', dest) - - host_info = self.get_host_info_dict_from_describe_dict(node) - - if dest in self.inventory["_meta"]["hostvars"]: - self.inventory["_meta"]["hostvars"][dest].update(host_info) - else: - self.inventory["_meta"]["hostvars"][dest] = host_info - - def add_elasticache_replication_group(self, replication_group, region): - ''' Adds an ElastiCache replication group to the inventory and index ''' - - # Only want available clusters unless all_elasticache_replication_groups is True - if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': - return - - # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) - if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ - replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: - return - - # Select the best destination address (PrimaryEndpoint) - dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] - - # Add to index - self.index[dest] = [region, replication_group['ReplicationGroupId']] - - # Inventory: Group by ID (always a group of 1) - if self.group_by_instance_id: - self.inventory[replication_group['ReplicationGroupId']] = [dest] - if self.nested_groups: - self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) - - # Inventory: Group by region - if self.group_by_region: - self.push(self.inventory, region, dest) - if self.nested_groups: - self.push_group(self.inventory, 'regions', region) - - # Inventory: Group by availability zone (doesn't apply to replication groups) - - # Inventory: Group by node type (doesn't apply to replication groups) - - # Inventory: Group by VPC (information not available in the current - # AWS API version for replication groups - - # Inventory: Group by security group (doesn't apply to replication groups) - # Check this value in cluster level - - # Inventory: Group by engine (replication groups are always Redis) - if self.group_by_elasticache_engine: - self.push(self.inventory, 'elasticache_redis', dest) - if self.nested_groups: - self.push_group(self.inventory, 'elasticache_engines', 'redis') - - # Global Tag: all ElastiCache clusters - self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) - - host_info = self.get_host_info_dict_from_describe_dict(replication_group) - - self.inventory["_meta"]["hostvars"][dest] = host_info - - def get_route53_records(self): - ''' Get and store the map of resource records to domain names that - point to them. ''' - - if self.boto_profile: - r53_conn = route53.Route53Connection(profile_name=self.boto_profile) - else: - r53_conn = route53.Route53Connection() - all_zones = r53_conn.get_zones() - - route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] - - self.route53_records = {} - - for zone in route53_zones: - rrsets = r53_conn.get_all_rrsets(zone.id) - - for record_set in rrsets: - record_name = record_set.name - - if record_name.endswith('.'): - record_name = record_name[:-1] - - for resource in record_set.resource_records: - self.route53_records.setdefault(resource, set()) - self.route53_records[resource].add(record_name) - - def get_instance_route53_names(self, instance): - ''' Check if an instance is referenced in the records we have from - Route53. If it is, return the list of domain names pointing to said - instance. If nothing points to it, return an empty list. ''' - - instance_attributes = ['public_dns_name', 'private_dns_name', - 'ip_address', 'private_ip_address'] - - name_list = set() - - for attrib in instance_attributes: - try: - value = getattr(instance, attrib) - except AttributeError: - continue - - if value in self.route53_records: - name_list.update(self.route53_records[value]) - - return list(name_list) - - def get_host_info_dict_from_instance(self, instance): - instance_vars = {} - for key in vars(instance): - value = getattr(instance, key) - key = self.to_safe('ec2_' + key) - - # Handle complex types - # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 - if key == 'ec2__state': - instance_vars['ec2_state'] = instance.state or '' - instance_vars['ec2_state_code'] = instance.state_code - elif key == 'ec2__previous_state': - instance_vars['ec2_previous_state'] = instance.previous_state or '' - instance_vars['ec2_previous_state_code'] = instance.previous_state_code - elif isinstance(value, (int, bool)): - instance_vars[key] = value - elif isinstance(value, six.string_types): - instance_vars[key] = value.strip() - elif value is None: - instance_vars[key] = '' - elif key == 'ec2_region': - instance_vars[key] = value.name - elif key == 'ec2__placement': - instance_vars['ec2_placement'] = value.zone - elif key == 'ec2_tags': - for k, v in value.items(): - if self.expand_csv_tags and ',' in v: - v = list(map(lambda x: x.strip(), v.split(','))) - key = self.to_safe('ec2_tag_' + k) - instance_vars[key] = v - elif key == 'ec2_groups': - group_ids = [] - group_names = [] - for group in value: - group_ids.append(group.id) - group_names.append(group.name) - instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) - instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) - elif key == 'ec2_block_device_mapping': - instance_vars["ec2_block_devices"] = {} - for k, v in value.items(): - instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id - else: - pass - # TODO Product codes if someone finds them useful - # print key - # print type(value) - # print value - - instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id - - return instance_vars - - def get_host_info_dict_from_describe_dict(self, describe_dict): - ''' Parses the dictionary returned by the API call into a flat list - of parameters. This method should be used only when 'describe' is - used directly because Boto doesn't provide specific classes. ''' - - # I really don't agree with prefixing everything with 'ec2' - # because EC2, RDS and ElastiCache are different services. - # I'm just following the pattern used until now to not break any - # compatibility. - - host_info = {} - for key in describe_dict: - value = describe_dict[key] - key = self.to_safe('ec2_' + self.uncammelize(key)) - - # Handle complex types - - # Target: Memcached Cache Clusters - if key == 'ec2_configuration_endpoint' and value: - host_info['ec2_configuration_endpoint_address'] = value['Address'] - host_info['ec2_configuration_endpoint_port'] = value['Port'] - - # Target: Cache Nodes and Redis Cache Clusters (single node) - if key == 'ec2_endpoint' and value: - host_info['ec2_endpoint_address'] = value['Address'] - host_info['ec2_endpoint_port'] = value['Port'] - - # Target: Redis Replication Groups - if key == 'ec2_node_groups' and value: - host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] - host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] - replica_count = 0 - for node in value[0]['NodeGroupMembers']: - if node['CurrentRole'] == 'primary': - host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] - host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] - host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] - elif node['CurrentRole'] == 'replica': - host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] - host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] - host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] - replica_count += 1 - - # Target: Redis Replication Groups - if key == 'ec2_member_clusters' and value: - host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) - - # Target: All Cache Clusters - elif key == 'ec2_cache_parameter_group': - host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) - host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] - host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] - - # Target: Almost everything - elif key == 'ec2_security_groups': - - # Skip if SecurityGroups is None - # (it is possible to have the key defined but no value in it). - if value is not None: - sg_ids = [] - for sg in value: - sg_ids.append(sg['SecurityGroupId']) - host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) - - # Target: Everything - # Preserve booleans and integers - elif isinstance(value, (int, bool)): - host_info[key] = value - - # Target: Everything - # Sanitize string values - elif isinstance(value, six.string_types): - host_info[key] = value.strip() - - # Target: Everything - # Replace None by an empty string - elif value is None: - host_info[key] = '' - - else: - # Remove non-processed complex types - pass - - return host_info - - def get_host_info(self): - ''' Get variables about a specific host ''' - - if len(self.index) == 0: - # Need to load index from cache - self.load_index_from_cache() - - if self.args.host not in self.index: - # try updating the cache - self.do_api_calls_update_cache() - if self.args.host not in self.index: - # host might not exist anymore - return self.json_format_dict({}, True) - - (region, instance_id) = self.index[self.args.host] - - instance = self.get_instance(region, instance_id) - return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) - - def push(self, my_dict, key, element): - ''' Push an element onto an array that may not have been defined in - the dict ''' - group_info = my_dict.setdefault(key, []) - if isinstance(group_info, dict): - host_list = group_info.setdefault('hosts', []) - host_list.append(element) - else: - group_info.append(element) - - def push_group(self, my_dict, key, element): - ''' Push a group as a child of another group. ''' - parent_group = my_dict.setdefault(key, {}) - if not isinstance(parent_group, dict): - parent_group = my_dict[key] = {'hosts': parent_group} - child_groups = parent_group.setdefault('children', []) - if element not in child_groups: - child_groups.append(element) - - def get_inventory_from_cache(self): - ''' Reads the inventory from the cache file and returns it as a JSON - object ''' - - with open(self.cache_path_cache, 'r') as f: - json_inventory = f.read() - return json_inventory - - def load_index_from_cache(self): - ''' Reads the index from the cache file sets self.index ''' - - with open(self.cache_path_index, 'rb') as f: - self.index = json.load(f) - - def write_to_cache(self, data, filename): - ''' Writes data in JSON format to a file ''' - - json_data = self.json_format_dict(data, True) - with open(filename, 'w') as f: - f.write(json_data) - - def uncammelize(self, key): - temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) - return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - regex = r"[^A-Za-z0-9\_" - if not self.replace_dash_in_groups: - regex += r"\-" - return re.sub(regex + "]", "_", word) - - def json_format_dict(self, data, pretty=False): - ''' Converts a dict to a JSON object and dumps it as a formatted - string ''' - - if pretty: - return json.dumps(data, sort_keys=True, indent=2, default=self._json_serial) - else: - return json.dumps(data, default=self._json_serial) - - -if __name__ == '__main__': - # Run the script - Ec2Inventory() diff --git a/test-requirements.txt b/test-requirements.txt index 41566c53e71..b3005cf8205 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,5 @@ botocore boto3 -boto python-dateutil # Used by ec2_asg_scheduled_action diff --git a/tests/integration/requirements.txt b/tests/integration/requirements.txt index 387e8069351..352e8b7ff0f 100644 --- a/tests/integration/requirements.txt +++ b/tests/integration/requirements.txt @@ -1,5 +1,4 @@ # Our code is based on the AWS SDKs -boto boto3 botocore diff --git a/tests/integration/targets/script_inventory_ec2/aliases b/tests/integration/targets/script_inventory_ec2/aliases deleted file mode 100644 index 87228f92579..00000000000 --- a/tests/integration/targets/script_inventory_ec2/aliases +++ /dev/null @@ -1,3 +0,0 @@ -needs/file/scripts/inventory/ec2.py -# Unsupported, runme.sh is currently broken, and script is deprecated https://github.com/ansible-collections/community.aws/pull/596 -disabled diff --git a/tests/integration/targets/script_inventory_ec2/ec2.sh b/tests/integration/targets/script_inventory_ec2/ec2.sh deleted file mode 100644 index 9c6c127eb4f..00000000000 --- a/tests/integration/targets/script_inventory_ec2/ec2.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash -# Wrapper to use the correct Python interpreter and support code coverage. -ABS_SCRIPT=$(python -c "import os; print(os.path.abspath('../../../../scripts/inventory/ec2.py'))") -cd "${OUTPUT_DIR}" -python.py "${ABS_SCRIPT}" "$@" diff --git a/tests/integration/targets/script_inventory_ec2/inventory_diff.py b/tests/integration/targets/script_inventory_ec2/inventory_diff.py deleted file mode 100644 index 3aaeff50b43..00000000000 --- a/tests/integration/targets/script_inventory_ec2/inventory_diff.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import json -import sys - - -def check_hosts(contrib, plugin): - contrib_hosts = sorted(contrib['_meta']['hostvars'].keys()) - plugin_hosts = sorted(plugin['_meta']['hostvars'].keys()) - assert contrib_hosts == plugin_hosts - return contrib_hosts, plugin_hosts - - -def check_groups(contrib, plugin): - contrib_groups = set(contrib.keys()) - plugin_groups = set(plugin.keys()) - missing_groups = contrib_groups.difference(plugin_groups) - if missing_groups: - print("groups: %s are missing from the plugin" % missing_groups) - assert not missing_groups - return contrib_groups, plugin_groups - - -def check_host_vars(key, value, plugin, host): - # tags are a dict in the plugin - if key.startswith('ec2_tag'): - print('assert tag', key, value) - assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host' - btags = plugin['_meta']['hostvars'][host]['tags'] - tagkey = key.replace('ec2_tag_', '') - assert tagkey in btags, '%s tag not in b file host tags' % tagkey - assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey]) - else: - print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key)) - assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host) - assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key]) - - -def main(): - # a should be the source of truth (the script output) - a = sys.argv[1] - # b should be the thing to check (the plugin output) - b = sys.argv[2] - - with open(a, 'r') as f: - adata = json.loads(f.read()) - with open(b, 'r') as f: - bdata = json.loads(f.read()) - - # all hosts should be present obviously - ahosts, bhosts = check_hosts(adata, bdata) - - # all groups should be present obviously - agroups, bgroups = check_groups(adata, bdata) - - # check host vars can be reconstructed - for ahost in ahosts: - contrib_host_vars = adata['_meta']['hostvars'][ahost] - for key, value in contrib_host_vars.items(): - check_host_vars(key, value, bdata, ahost) - - -if __name__ == "__main__": - main() diff --git a/tests/integration/targets/script_inventory_ec2/runme.sh b/tests/integration/targets/script_inventory_ec2/runme.sh deleted file mode 100755 index 05772955892..00000000000 --- a/tests/integration/targets/script_inventory_ec2/runme.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/usr/bin/env bash - -set -eux - -source virtualenv.sh - -pip install "python-dateutil>=2.1,<2.7.0" jmespath "Jinja2==2.10" - -# create boto3 symlinks -ln -s "$(pwd)/lib/boto" "$(pwd)/lib/boto3" -ln -s "$(pwd)/lib/boto" "$(pwd)/lib/botocore" - -# override boto's import path(s) -export PYTHONPATH -PYTHONPATH="$(pwd)/lib:$PYTHONPATH" - -################################################# -# RUN THE SCRIPT -################################################# - -# run the script first -cat << EOF > "$OUTPUT_DIR/ec2.ini" -[ec2] -regions = us-east-1 -cache_path = $(pwd)/.cache -cache_max_age = 0 -group_by_tag_none = False - -[credentials] -aws_access_key_id = FOO -aws_secret_acccess_key = BAR -EOF - -ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i ./ec2.sh --list --output="$OUTPUT_DIR/script.out" -RC=$? -if [[ $RC != 0 ]]; then - exit $RC -fi - -################################################# -# RUN THE PLUGIN -################################################# - -# run the plugin second -export ANSIBLE_INVENTORY_ENABLED=aws_ec2 -export ANSIBLE_INVENTORY=test.aws_ec2.yml -export AWS_ACCESS_KEY_ID=FOO -export AWS_SECRET_ACCESS_KEY=BAR -export ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never - -cat << EOF > "$OUTPUT_DIR/test.aws_ec2.yml" -plugin: aws_ec2 -cache: False -use_contrib_script_compatible_sanitization: True -strict: True -regions: - - us-east-1 -hostnames: - - network-interface.addresses.association.public-ip - - dns-name -filters: - instance-state-name: running -compose: - # vars that don't exist anymore in any meaningful way - ec2_item: undefined | default("") - ec2_monitoring: undefined | default("") - ec2_previous_state: undefined | default("") - ec2_previous_state_code: undefined | default(0) - ec2__in_monitoring_element: undefined | default(false) - # the following three will be accessible again after #53645 - ec2_requester_id: undefined | default("") - ec2_eventsSet: undefined | default("") - ec2_persistent: undefined | default(false) - - # vars that change - ansible_host: public_ip_address - ec2_block_devices: dict(block_device_mappings | map(attribute='device_name') | map('basename') | list | zip(block_device_mappings | map(attribute='ebs.volume_id') | list)) - ec2_dns_name: public_dns_name - ec2_group_name: placement['group_name'] - ec2_id: instance_id - ec2_instance_profile: iam_instance_profile | default("") - ec2_ip_address: public_ip_address - ec2_kernel: kernel_id | default("") - ec2_monitored: monitoring['state'] in ['enabled', 'pending'] - ec2_monitoring_state: monitoring['state'] - ec2_account_id: owner_id - ec2_placement: placement['availability_zone'] - ec2_ramdisk: ramdisk_id | default("") - ec2_reason: state_transition_reason - ec2_security_group_ids: security_groups | map(attribute='group_id') | list | sort | join(',') - ec2_security_group_names: security_groups | map(attribute='group_name') | list | sort | join(',') - ec2_state: state['name'] - ec2_state_code: state['code'] - ec2_state_reason: state_reason['message'] if state_reason is defined else "" - ec2_sourceDestCheck: source_dest_check | lower | string # butchered snake_case case not a typo. - - # vars that just need ec2_ prefix - ec2_ami_launch_index: ami_launch_index | string - ec2_architecture: architecture - ec2_client_token: client_token - ec2_ebs_optimized: ebs_optimized - ec2_hypervisor: hypervisor - ec2_image_id: image_id - ec2_instance_type: instance_type - ec2_key_name: key_name - ec2_launch_time: 'launch_time | regex_replace(" ", "T") | regex_replace("(\+)(\d\d):(\d)(\d)$", ".\g<2>\g<3>Z")' - ec2_platform: platform | default("") - ec2_private_dns_name: private_dns_name - ec2_private_ip_address: private_ip_address - ec2_public_dns_name: public_dns_name - ec2_region: placement['region'] - ec2_root_device_name: root_device_name - ec2_root_device_type: root_device_type - ec2_spot_instance_request_id: spot_instance_request_id | default("") - ec2_subnet_id: subnet_id - ec2_virtualization_type: virtualization_type - ec2_vpc_id: vpc_id - tags: dict(tags.keys() | map('regex_replace', '[^A-Za-z0-9\_]', '_') | list | zip(tags.values() | list)) - -keyed_groups: - - key: '"ec2"' - separator: "" - - key: 'instance_id' - separator: "" - - key: tags - prefix: tag - - key: key_name | regex_replace('-', '_') - prefix: key - - key: placement['region'] - separator: "" - - key: placement['availability_zone'] - separator: "" - - key: platform | default('undefined') - prefix: platform - - key: vpc_id | regex_replace('-', '_') - prefix: vpc_id - - key: instance_type - prefix: type - - key: "image_id | regex_replace('-', '_')" - separator: "" - - key: security_groups | map(attribute='group_name') | map("regex_replace", "-", "_") | list - prefix: security_group -EOF - -ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i "$OUTPUT_DIR/test.aws_ec2.yml" --list --output="$OUTPUT_DIR/plugin.out" - -################################################# -# DIFF THE RESULTS -################################################# - -./inventory_diff.py "$OUTPUT_DIR/script.out" "$OUTPUT_DIR/plugin.out" diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt index 4c5b1ce9282..f4f3ad27f44 100644 --- a/tests/unit/requirements.txt +++ b/tests/unit/requirements.txt @@ -1,7 +1,6 @@ # Our code is based on the AWS SDKs botocore boto3 -boto placebo cryptography From 3e30e37d7188e75a3ef19173e6a476742ce21e8e Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 1 Feb 2022 23:06:06 +0100 Subject: [PATCH 13/31] Fix unit tests which broke due to things being moved about in amazon.aws (#914) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix unit tests which broke due to things being moved about in amazon.aws Depends-On: ansible/ansible-zuul-jobs#1324 SUMMARY ansible-collections/amazon.aws#649 moved a few things around, including a "private" wrapper class. The shuffles broke part of the monkey patching in some unit tests, and the private wrapper class wasn't made available in the old location (it's private, using it might result in things breaking) ISSUE TYPE Bugfix Pull Request COMPONENT NAME tests/unit/plugins/modules/test_aws_api_gateway.py tests/unit/plugins/modules/test_ec2_vpc_vpn.py ADDITIONAL INFORMATION CC @marknet15 Reviewed-by: Gonéri Le Bouder --- tests/unit/plugins/modules/test_aws_api_gateway.py | 10 ++++++---- tests/unit/plugins/modules/test_ec2_vpc_vpn.py | 8 ++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/tests/unit/plugins/modules/test_aws_api_gateway.py b/tests/unit/plugins/modules/test_aws_api_gateway.py index fd36029de46..ced1db82632 100644 --- a/tests/unit/plugins/modules/test_aws_api_gateway.py +++ b/tests/unit/plugins/modules/test_aws_api_gateway.py @@ -11,8 +11,8 @@ import sys import pytest -from ansible_collections.amazon.aws.plugins.module_utils import core -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils import modules as aws_modules +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 from ansible_collections.community.aws.tests.unit.plugins.modules.utils import set_module_args import ansible_collections.community.aws.plugins.modules.aws_api_gateway as agw @@ -41,8 +41,10 @@ def put_rest_api(self, *args, **kwargs): def return_fake_connection(*args, **kwargs): return FakeConnection() - monkeypatch.setattr(core, "boto3_conn", return_fake_connection) - monkeypatch.setattr(core.AnsibleAWSModule, "exit_json", fake_exit_json) + # Because it's imported into the aws_modules namespace we need to override + # it there, even though the function itself lives in module_utils.botocore + monkeypatch.setattr(aws_modules, "boto3_conn", return_fake_connection) + monkeypatch.setattr(aws_modules.AnsibleAWSModule, "exit_json", fake_exit_json) set_module_args({ "api_id": "fred", diff --git a/tests/unit/plugins/modules/test_ec2_vpc_vpn.py b/tests/unit/plugins/modules/test_ec2_vpc_vpn.py index 49ef596a5b1..19fce6a4da8 100644 --- a/tests/unit/plugins/modules/test_ec2_vpc_vpn.py +++ b/tests/unit/plugins/modules/test_ec2_vpc_vpn.py @@ -11,8 +11,8 @@ from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep # pylint: disable=unused-import -import ansible_collections.amazon.aws.plugins.module_utils.core as aws_core -import ansible_collections.amazon.aws.plugins.module_utils.ec2 as aws_ec2 +import ansible_collections.amazon.aws.plugins.module_utils.modules as aws_modules +import ansible_collections.amazon.aws.plugins.module_utils.retries as aws_retries from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict @@ -82,8 +82,8 @@ def get_dependencies(): def setup_mod_conn(placeboify, params): conn = placeboify.client('ec2') - retry_decorator = aws_ec2.AWSRetry.jittered_backoff() - wrapped_conn = aws_core._RetryingBotoClientWrapper(conn, retry_decorator) + retry_decorator = aws_retries.AWSRetry.jittered_backoff() + wrapped_conn = aws_modules._RetryingBotoClientWrapper(conn, retry_decorator) m = FakeModule(**params) return m, wrapped_conn From 5e09149bf273d24c7a1238e82fb22e838c4b29df Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Wed, 2 Feb 2022 11:41:14 +0100 Subject: [PATCH 14/31] Move some shared logic in module utils/sns (#879) Move some shared logic in module utils/sns SUMMARY Move some shared logic in module utils/sns ISSUE TYPE Feature Pull Request COMPONENT NAME sns_topic sns sns_topic_info Reviewed-by: Mark Chappell Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz --- plugins/module_utils/sns.py | 125 ++++++++++++++++++ plugins/modules/sns.py | 19 +-- plugins/modules/sns_topic.py | 117 +++------------- .../targets/sns_topic/tasks/main.yml | 4 +- 4 files changed, 144 insertions(+), 121 deletions(-) create mode 100644 plugins/module_utils/sns.py diff --git a/plugins/module_utils/sns.py b/plugins/module_utils/sns.py new file mode 100644 index 00000000000..27ab8773531 --- /dev/null +++ b/plugins/module_utils/sns.py @@ -0,0 +1,125 @@ +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re +import copy + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict + + +@AWSRetry.jittered_backoff() +def _list_topics_with_backoff(client): + paginator = client.get_paginator('list_topics') + return paginator.paginate().build_full_result()['Topics'] + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) +def _list_topic_subscriptions_with_backoff(client, topic_arn): + paginator = client.get_paginator('list_subscriptions_by_topic') + return paginator.paginate(TopicArn=topic_arn).build_full_result()['Subscriptions'] + + +@AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) +def _list_subscriptions_with_backoff(client): + paginator = client.get_paginator('list_subscriptions') + return paginator.paginate().build_full_result()['Subscriptions'] + + +def list_topic_subscriptions(client, module, topic_arn): + try: + return _list_topic_subscriptions_with_backoff(client, topic_arn) + except is_boto3_error_code('AuthorizationError'): + try: + # potentially AuthorizationError when listing subscriptions for third party topic + return [sub for sub in _list_subscriptions_with_backoff(client) + if sub['TopicArn'] == topic_arn] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) + + +def list_topics(client, module): + try: + topics = _list_topics_with_backoff(client) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't get topic list") + return [t['TopicArn'] for t in topics] + + +def topic_arn_lookup(client, module, name): + # topic names cannot have colons, so this captures the full topic name + all_topics = list_topics(client, module) + lookup_topic = ':%s' % name + for topic in all_topics: + if topic.endswith(lookup_topic): + return topic + + +def compare_delivery_policies(policy_a, policy_b): + _policy_a = copy.deepcopy(policy_a) + _policy_b = copy.deepcopy(policy_b) + # AWS automatically injects disableSubscriptionOverrides if you set an + # http policy + if 'http' in policy_a: + if 'disableSubscriptionOverrides' not in policy_a['http']: + _policy_a['http']['disableSubscriptionOverrides'] = False + if 'http' in policy_b: + if 'disableSubscriptionOverrides' not in policy_b['http']: + _policy_b['http']['disableSubscriptionOverrides'] = False + comparison = (_policy_a != _policy_b) + return comparison + + +def canonicalize_endpoint(protocol, endpoint): + # AWS SNS expects phone numbers in + # and canonicalizes to E.164 format + # See + if protocol == 'sms': + return re.sub('[^0-9+]*', '', endpoint) + return endpoint + + +def get_info(connection, module, topic_arn): + name = module.params.get('name') + topic_type = module.params.get('topic_type') + state = module.params.get('state') + subscriptions = module.params.get('subscriptions') + purge_subscriptions = module.params.get('purge_subscriptions') + subscriptions_existing = module.params.get('subscriptions_existing', []) + subscriptions_deleted = module.params.get('subscriptions_deleted', []) + subscriptions_added = module.params.get('subscriptions_added', []) + subscriptions_added = module.params.get('subscriptions_added', []) + topic_created = module.params.get('topic_created', False) + topic_deleted = module.params.get('topic_deleted', False) + attributes_set = module.params.get('attributes_set', []) + check_mode = module.check_mode + + info = { + 'name': name, + 'topic_type': topic_type, + 'state': state, + 'subscriptions_new': subscriptions, + 'subscriptions_existing': subscriptions_existing, + 'subscriptions_deleted': subscriptions_deleted, + 'subscriptions_added': subscriptions_added, + 'subscriptions_purge': purge_subscriptions, + 'check_mode': check_mode, + 'topic_created': topic_created, + 'topic_deleted': topic_deleted, + 'attributes_set': attributes_set, + } + if state != 'absent': + if topic_arn in list_topics(connection, module): + info.update(camel_dict_to_snake_dict(connection.get_topic_attributes(TopicArn=topic_arn)['Attributes'])) + info['delivery_policy'] = info.pop('effective_delivery_policy') + info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in list_topic_subscriptions(connection, module, topic_arn)] + + return info diff --git a/plugins/modules/sns.py b/plugins/modules/sns.py index a18c3279173..fc400bac5e0 100644 --- a/plugins/modules/sns.py +++ b/plugins/modules/sns.py @@ -134,22 +134,7 @@ pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule - - -def arn_topic_lookup(module, client, short_topic): - lookup_topic = ':{0}'.format(short_topic) - - try: - paginator = client.get_paginator('list_topics') - topic_iterator = paginator.paginate() - for response in topic_iterator: - for topic in response['Topics']: - if topic['TopicArn'].endswith(lookup_topic): - return topic['TopicArn'] - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to look up topic ARN') - - return None +from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup def main(): @@ -205,7 +190,7 @@ def main(): # Short names can't contain ':' so we'll assume this is the full ARN sns_kwargs['TopicArn'] = topic else: - sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic) + sns_kwargs['TopicArn'] = topic_arn_lookup(client, module, topic) if not sns_kwargs['TopicArn']: module.fail_json(msg='Could not find topic: {0}'.format(topic)) diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py index 37cf573ce58..817729c33e8 100644 --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -284,8 +284,6 @@ ''' import json -import re -import copy try: import botocore @@ -293,11 +291,14 @@ pass # handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.community.aws.plugins.module_utils.sns import list_topics +from ansible_collections.community.aws.plugins.module_utils.sns import topic_arn_lookup +from ansible_collections.community.aws.plugins.module_utils.sns import compare_delivery_policies +from ansible_collections.community.aws.plugins.module_utils.sns import list_topic_subscriptions +from ansible_collections.community.aws.plugins.module_utils.sns import canonicalize_endpoint +from ansible_collections.community.aws.plugins.module_utils.sns import get_info class SnsTopicManager(object): @@ -334,36 +335,6 @@ def __init__(self, self.topic_arn = None self.attributes_set = [] - @AWSRetry.jittered_backoff() - def _list_topics_with_backoff(self): - paginator = self.connection.get_paginator('list_topics') - return paginator.paginate().build_full_result()['Topics'] - - @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) - def _list_topic_subscriptions_with_backoff(self): - paginator = self.connection.get_paginator('list_subscriptions_by_topic') - return paginator.paginate(TopicArn=self.topic_arn).build_full_result()['Subscriptions'] - - @AWSRetry.jittered_backoff(catch_extra_error_codes=['NotFound']) - def _list_subscriptions_with_backoff(self): - paginator = self.connection.get_paginator('list_subscriptions') - return paginator.paginate().build_full_result()['Subscriptions'] - - def _list_topics(self): - try: - topics = self._list_topics_with_backoff() - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get topic list") - return [t['TopicArn'] for t in topics] - - def _topic_arn_lookup(self): - # topic names cannot have colons, so this captures the full topic name - all_topics = self._list_topics() - lookup_topic = ':%s' % self.name - for topic in all_topics: - if topic.endswith(lookup_topic): - return topic - def _create_topic(self): attributes = {'FifoTopic': 'false'} tags = [] @@ -381,20 +352,6 @@ def _create_topic(self): self.topic_arn = response['TopicArn'] return True - def _compare_delivery_policies(self, policy_a, policy_b): - _policy_a = copy.deepcopy(policy_a) - _policy_b = copy.deepcopy(policy_b) - # AWS automatically injects disableSubscriptionOverrides if you set an - # http policy - if 'http' in policy_a: - if 'disableSubscriptionOverrides' not in policy_a['http']: - _policy_a['http']['disableSubscriptionOverrides'] = False - if 'http' in policy_b: - if 'disableSubscriptionOverrides' not in policy_b['http']: - _policy_b['http']['disableSubscriptionOverrides'] = False - comparison = (_policy_a != _policy_b) - return comparison - def _set_topic_attrs(self): changed = False try: @@ -423,7 +380,7 @@ def _set_topic_attrs(self): self.module.fail_json_aws(e, msg="Couldn't set topic policy") if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or - self._compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): + compare_delivery_policies(self.delivery_policy, json.loads(topic_attributes['DeliveryPolicy']))): changed = True self.attributes_set.append('delivery_policy') if not self.check_mode: @@ -434,22 +391,14 @@ def _set_topic_attrs(self): self.module.fail_json_aws(e, msg="Couldn't set topic delivery policy") return changed - def _canonicalize_endpoint(self, protocol, endpoint): - # AWS SNS expects phone numbers in - # and canonicalizes to E.164 format - # See - if protocol == 'sms': - return re.sub('[^0-9+]*', '', endpoint) - return endpoint - def _set_topic_subs(self): changed = False subscriptions_existing_list = set() desired_subscriptions = [(sub['protocol'], - self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in + canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in self.subscriptions] - for sub in self._list_topic_subscriptions(): + for sub in list_topic_subscriptions(self.connection, self.module, self.topic_arn): sub_key = (sub['Protocol'], sub['Endpoint']) subscriptions_existing_list.add(sub_key) if (self.purge_subscriptions and sub_key not in desired_subscriptions and @@ -472,23 +421,10 @@ def _set_topic_subs(self): self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) return changed - def _list_topic_subscriptions(self): - try: - return self._list_topic_subscriptions_with_backoff() - except is_boto3_error_code('AuthorizationError'): - try: - # potentially AuthorizationError when listing subscriptions for third party topic - return [sub for sub in self._list_subscriptions_with_backoff() - if sub['TopicArn'] == self.topic_arn] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % self.topic_arn) - def _delete_subscriptions(self): # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days # https://forums.aws.amazon.com/thread.jspa?threadID=85993 - subscriptions = self._list_topic_subscriptions() + subscriptions = list_topic_subscriptions(self.connection, self.module, self.topic_arn) if not subscriptions: return False for sub in subscriptions: @@ -518,10 +454,10 @@ def ensure_ok(self): if self._name_is_arn(): self.topic_arn = self.name else: - self.topic_arn = self._topic_arn_lookup() + self.topic_arn = topic_arn_lookup(self.connection, self.module, self.name) if not self.topic_arn: changed = self._create_topic() - if self.topic_arn in self._list_topics(): + if self.topic_arn in list_topics(self.connection, self.module): changed |= self._set_topic_attrs() elif self.display_name or self.policy or self.delivery_policy: self.module.fail_json(msg="Cannot set display name, policy or delivery policy for SNS topics not owned by this account") @@ -533,37 +469,14 @@ def ensure_gone(self): if self._name_is_arn(): self.topic_arn = self.name else: - self.topic_arn = self._topic_arn_lookup() + self.topic_arn = topic_arn_lookup(self.connection, self.module, self.name) if self.topic_arn: - if self.topic_arn not in self._list_topics(): + if self.topic_arn not in list_topics(self.connection, self.module): self.module.fail_json(msg="Cannot use state=absent with third party ARN. Use subscribers=[] to unsubscribe") changed = self._delete_subscriptions() changed |= self._delete_topic() return changed - def get_info(self): - info = { - 'name': self.name, - 'topic_type': self.topic_type, - 'state': self.state, - 'subscriptions_new': self.subscriptions, - 'subscriptions_existing': self.subscriptions_existing, - 'subscriptions_deleted': self.subscriptions_deleted, - 'subscriptions_added': self.subscriptions_added, - 'subscriptions_purge': self.purge_subscriptions, - 'check_mode': self.check_mode, - 'topic_created': self.topic_created, - 'topic_deleted': self.topic_deleted, - 'attributes_set': self.attributes_set, - } - if self.state != 'absent': - if self.topic_arn in self._list_topics(): - info.update(camel_dict_to_snake_dict(self.connection.get_topic_attributes(TopicArn=self.topic_arn)['Attributes'])) - info['delivery_policy'] = info.pop('effective_delivery_policy') - info['subscriptions'] = [camel_dict_to_snake_dict(sub) for sub in self._list_topic_subscriptions()] - - return info - def main(): @@ -635,7 +548,7 @@ def main(): sns_facts = dict(changed=changed, sns_arn=sns_topic.topic_arn, - sns_topic=sns_topic.get_info()) + sns_topic=get_info(sns_topic.connection, module, sns_topic.topic_arn)) module.exit_json(**sns_facts) diff --git a/tests/integration/targets/sns_topic/tasks/main.yml b/tests/integration/targets/sns_topic/tasks/main.yml index 94214b20fa9..ab245490cbd 100644 --- a/tests/integration/targets/sns_topic/tasks/main.yml +++ b/tests/integration/targets/sns_topic/tasks/main.yml @@ -61,7 +61,7 @@ that: - sns_fifo_topic.changed - sns_fifo_topic.sns_topic.topic_type == 'fifo' - - sns_fifo_topic.sns_topic.name == '{{ sns_topic_topic_name }}-fifo.fifo' + - sns_fifo_topic.sns_topic.name == '{{ sns_topic_topic_name }}-fifo' - name: Run create a FIFO topic again for idempotence test sns_topic: @@ -214,7 +214,7 @@ name: '{{ sns_topic_lambda_name }}' state: present zip_file: '{{ tempdir.path }}/{{ sns_topic_lambda_function }}.zip' - runtime: python2.7 + runtime: python3.9 role: '{{ sns_topic_lambda_role }}' handler: '{{ sns_topic_lambda_function }}.handler' register: lambda_result From 0e08a409083947386482ff8423e85c4226a40dba Mon Sep 17 00:00:00 2001 From: Priyadarshini Chettiar <45838555+priyadarshu@users.noreply.github.com> Date: Thu, 3 Feb 2022 02:22:27 +0530 Subject: [PATCH 15/31] Update the name attribute value in the examples (#918) Update the name attribute value in the examples SUMMARY Problem- All the examples had same name key value irrespective of different purposes of the tasks Action taken - Made changes in the name of the tasks under examples Corrected it with relevant name key value to the comments of the task ISSUE TYPE Docs Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Joseph Torcasso Reviewed-by: Markus Bergholz --- plugins/modules/iam_managed_policy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/modules/iam_managed_policy.py b/plugins/modules/iam_managed_policy.py index d6cdd33525e..2b33d711e71 100644 --- a/plugins/modules/iam_managed_policy.py +++ b/plugins/modules/iam_managed_policy.py @@ -65,14 +65,14 @@ state: present # Update a policy with a new default version -- name: Create IAM Managed Policy +- name: Update an IAM Managed Policy with new default version community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: "{{ lookup('file', 'managed_policy_update.json') }}" state: present # Update a policy with a new non default version -- name: Create IAM Managed Policy +- name: Update an IAM Managed Policy with a non default version community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: @@ -85,7 +85,7 @@ state: present # Update a policy and make it the only version and the default version -- name: Create IAM Managed Policy +- name: Update an IAM Managed Policy with default version as the only version community.aws.iam_managed_policy: policy_name: "ManagedPolicy" policy: | @@ -101,7 +101,7 @@ state: present # Remove a policy -- name: Create IAM Managed Policy +- name: Remove an existing IAM Managed Policy community.aws.iam_managed_policy: policy_name: "ManagedPolicy" state: absent From d47e188d91e14a1986624a6e6eac5a1eafdc2b7c Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Thu, 3 Feb 2022 13:51:25 +0100 Subject: [PATCH 16/31] ec2_launch_template: implement missing metadata options (#917) ec2_launch_template: implement missing metadata options SUMMARY Add missing metadata options instance_metadata_tags http_protocol_ipv6 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_launch_template Reviewed-by: Markus Bergholz Reviewed-by: Mark Woolley Reviewed-by: Alina Buzachis --- ...dd-launch-template-metadata-parameters.yml | 2 + plugins/modules/ec2_launch_template.py | 38 ++++++- .../targets/ec2_launch_template/meta/main.yml | 3 + .../tasks/instance-metadata.yml | 100 ++++++++++++++---- 4 files changed, 119 insertions(+), 24 deletions(-) create mode 100644 changelogs/fragments/917-add-launch-template-metadata-parameters.yml diff --git a/changelogs/fragments/917-add-launch-template-metadata-parameters.yml b/changelogs/fragments/917-add-launch-template-metadata-parameters.yml new file mode 100644 index 00000000000..652745f48d2 --- /dev/null +++ b/changelogs/fragments/917-add-launch-template-metadata-parameters.yml @@ -0,0 +1,2 @@ +minor_changes: +- ec2_launch_template - Add metadata options parameter ``http_protocol_ipv6`` and ``instance_metadata_tags`` (https://github.com/ansible-collections/community.aws/pull/917). \ No newline at end of file diff --git a/plugins/modules/ec2_launch_template.py b/plugins/modules/ec2_launch_template.py index 1051c1b7c66..fab3c4100bd 100644 --- a/plugins/modules/ec2_launch_template.py +++ b/plugins/modules/ec2_launch_template.py @@ -353,6 +353,22 @@ The state of token usage for your instance metadata requests. choices: [optional, required] default: 'optional' + http_protocol_ipv6: + version_added: 3.1.0 + type: str + description: > + - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). + - Requires boto3 >= 1.18.29 + choices: [enabled, disabled] + default: 'disabled' + instance_metadata_tags: + version_added: 3.1.0 + type: str + description: + - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). + - Requires boto3 >= 1.20.30 + choices: [enabled, disabled] + default: 'disabled' ''' EXAMPLES = ''' @@ -516,6 +532,24 @@ def create_or_update(module, template_options): out = {} lt_data = params_to_launch_data(module, dict((k, v) for k, v in module.params.items() if k in template_options)) lt_data = scrub_none_parameters(lt_data, descend_into_lists=True) + + if lt_data.get('MetadataOptions'): + if not module.boto3_at_least('1.20.30'): + # fail only if enabled is requested + if lt_data['MetadataOptions'].get('InstanceMetadataTags') == 'enabled': + module.require_boto3_at_least('1.20.30', reason='to set instance_metadata_tags') + # pop if it's not requested to keep backwards compatibility. + # otherwise the modules failes because parameters are set due default values + lt_data['MetadataOptions'].pop('InstanceMetadataTags') + + if not module.boto3_at_least('1.18.29'): + # fail only if enabled is requested + if lt_data['MetadataOptions'].get('HttpProtocolIpv6') == 'enabled': + module.require_boto3_at_least('1.18.29', reason='to set http_protocol_ipv6') + # pop if it's not requested to keep backwards compatibility. + # otherwise the modules failes because parameters are set due default values + lt_data['MetadataOptions'].pop('HttpProtocolIpv6') + if not (template or template_versions): # create a full new one try: @@ -671,7 +705,9 @@ def main(): options=dict( http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional') + http_tokens=dict(choices=['optional', 'required'], default='optional'), + http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), + instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), ) ), network_interfaces=dict( diff --git a/tests/integration/targets/ec2_launch_template/meta/main.yml b/tests/integration/targets/ec2_launch_template/meta/main.yml index 38b31be0728..cc90eb5187f 100644 --- a/tests/integration/targets/ec2_launch_template/meta/main.yml +++ b/tests/integration/targets/ec2_launch_template/meta/main.yml @@ -2,3 +2,6 @@ dependencies: - prepare_tests - setup_ec2 - setup_remote_tmp_dir + - role: setup_botocore_pip + vars: + boto3_version: "1.20.30" diff --git a/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml b/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml index 99db5dec002..afe907f4faf 100644 --- a/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml +++ b/tests/integration/targets/ec2_launch_template/tasks/instance-metadata.yml @@ -1,24 +1,78 @@ -- block: - - name: metadata_options - ec2_launch_template: - name: "{{ resource_prefix }}-test-metadata" - metadata_options: - http_put_response_hop_limit: 1 - http_tokens: required - state: present - register: metadata_options_launch_template - - name: instance with metadata_options created with the right options - assert: - that: - - metadata_options_launch_template is changed - - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_put_response_hop_limit == 1" - - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_tokens == 'required'" +--- +- name: test with older boto3 version that does not support instance_metadata_tags + block: + - name: fail metadata_options + ec2_launch_template: + name: "{{ resource_prefix }}-test-metadata" + metadata_options: + http_put_response_hop_limit: 1 + http_tokens: required + http_protocol_ipv6: enabled + instance_metadata_tags: enabled + state: present + register: metadata_options_launch_template + ignore_errors: yes + - name: verify fail with usefull error message + assert: + that: + - metadata_options_launch_template.failed + - metadata_options_launch_template is not changed + - "'This is required to set instance_metadata_tags' in metadata_options_launch_template.msg" + + - name: success metadata_options + ec2_launch_template: + name: "{{ resource_prefix }}-test-metadata" + metadata_options: + http_put_response_hop_limit: 1 + http_tokens: required + state: present + register: metadata_options_launch_template + - name: instance with metadata_options created with the right options + assert: + that: + - metadata_options_launch_template is changed + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_put_response_hop_limit == 1" + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_tokens == 'required'" + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_protocol_ipv6 is not defined" + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.instance_metadata_tags is not defined" always: - - name: delete the template - ec2_launch_template: - name: "{{ resource_prefix }}-test-metadata" - state: absent - register: del_lt - retries: 10 - until: del_lt is not failed - ignore_errors: true + - name: delete the template + ec2_launch_template: + name: "{{ resource_prefix }}-test-metadata" + state: absent + register: del_lt + retries: 10 + until: del_lt is not failed + ignore_errors: true + +- name: test with boto3 version that supports instance_metadata_tags + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + block: + - name: metadata_options + ec2_launch_template: + name: "{{ resource_prefix }}-test-metadata" + metadata_options: + http_put_response_hop_limit: 1 + http_tokens: required + http_protocol_ipv6: enabled + instance_metadata_tags: enabled + state: present + register: metadata_options_launch_template + - name: instance with metadata_options created with the right options + assert: + that: + - metadata_options_launch_template is changed + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_put_response_hop_limit == 1" + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_tokens == 'required'" + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.http_protocol_ipv6 == 'enabled'" + - "metadata_options_launch_template.latest_template.launch_template_data.metadata_options.instance_metadata_tags == 'enabled'" + always: + - name: delete the template + ec2_launch_template: + name: "{{ resource_prefix }}-test-metadata" + state: absent + register: del_lt + retries: 10 + until: del_lt is not failed + ignore_errors: true From ca1d33ff5549eae5eb40aa7dfab4eb059075f70e Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 4 Feb 2022 12:03:55 +0000 Subject: [PATCH 17/31] Add AWSRetry backoff logic to route53_zone and route53_info (#865) Add AWSRetry backoff logic to route53_zone and route53_info SUMMARY Add AWSRetry backoff logic to route53_zone and route53_info. Currently from time to time I've been hitting AWS throttling errors leading to ansible failures: An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (Throttling) when calling the ListHostedZones operation (reached max retries: 4): Rate exceeded fatal: [localhost_staging -> 127.0.0.1]: FAILED! => changed=false boto3_version: 1.20.34 botocore_version: 1.23.34 error: code: Throttling message: Rate exceeded type: Sender msg: 'Could not list current hosted zones: An error occurred (Throttling) when calling the ListHostedZones operation (reached max retries: 4): Rate exceeded' response_metadata: http_headers: connection: close content-length: '255' content-type: text/xml date: Fri, 14 Jan 2022 12:09:35 GMT x-amzn-requestid: xxxxxxx http_status_code: 400 max_attempts_reached: true request_id: xxxxxxx retry_attempts: 4 ISSUE TYPE Bugfix Pull Request COMPONENT NAME route53_zone route53_info ADDITIONAL INFORMATION I've added the standard backoff retry logic and split out the paginators. Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- ...5-add-backoff-retry-logic-route53_zone.yml | 3 + plugins/modules/route53_info.py | 56 +++++++++++-------- plugins/modules/route53_zone.py | 53 ++++++++++-------- 3 files changed, 67 insertions(+), 45 deletions(-) create mode 100644 changelogs/fragments/865-add-backoff-retry-logic-route53_zone.yml diff --git a/changelogs/fragments/865-add-backoff-retry-logic-route53_zone.yml b/changelogs/fragments/865-add-backoff-retry-logic-route53_zone.yml new file mode 100644 index 00000000000..6f49a45397b --- /dev/null +++ b/changelogs/fragments/865-add-backoff-retry-logic-route53_zone.yml @@ -0,0 +1,3 @@ +bugfixes: + - Add backoff retry logic to route53_zone (https://github.com/ansible-collections/community.aws/pull/865). + - Add backoff retry logic to route53_info (https://github.com/ansible-collections/community.aws/pull/865). diff --git a/plugins/modules/route53_info.py b/plugins/modules/route53_info.py index e2f1cd686ff..7622113c25e 100644 --- a/plugins/modules/route53_info.py +++ b/plugins/modules/route53_info.py @@ -212,9 +212,17 @@ from ansible.module_utils._text import to_native from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -def get_hosted_zone(client, module): +# Split out paginator to allow for the backoff decorator to function +@AWSRetry.jittered_backoff() +def _paginated_result(paginator_name, **params): + paginator = client.get_paginator(paginator_name) + return paginator.paginate(**params).build_full_result() + + +def get_hosted_zone(): params = dict() if module.params.get('hosted_zone_id'): @@ -225,7 +233,7 @@ def get_hosted_zone(client, module): return client.get_hosted_zone(**params) -def reusable_delegation_set_details(client, module): +def reusable_delegation_set_details(): params = dict() if not module.params.get('delegation_set_id'): @@ -246,7 +254,7 @@ def reusable_delegation_set_details(client, module): return results -def list_hosted_zones(client, module): +def list_hosted_zones(): params = dict() # Set PaginationConfig with max_items @@ -261,15 +269,15 @@ def list_hosted_zones(client, module): if module.params.get('delegation_set_id'): params['DelegationSetId'] = module.params.get('delegation_set_id') - paginator = client.get_paginator('list_hosted_zones') - zones = paginator.paginate(**params).build_full_result()['HostedZones'] + zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] + return { "HostedZones": zones, "list": zones, } -def list_hosted_zones_by_name(client, module): +def list_hosted_zones_by_name(): params = dict() if module.params.get('hosted_zone_id'): @@ -287,7 +295,7 @@ def list_hosted_zones_by_name(client, module): return client.list_hosted_zones_by_name(**params) -def change_details(client, module): +def change_details(): params = dict() if module.params.get('change_id'): @@ -299,11 +307,11 @@ def change_details(client, module): return results -def checker_ip_range_details(client, module): +def checker_ip_range_details(): return client.get_checker_ip_ranges() -def get_count(client, module): +def get_count(): if module.params.get('query') == 'health_check': results = client.get_health_check_count() else: @@ -312,7 +320,7 @@ def get_count(client, module): return results -def get_health_check(client, module): +def get_health_check(): params = dict() if not module.params.get('health_check_id'): @@ -330,7 +338,7 @@ def get_health_check(client, module): return results -def get_resource_tags(client, module): +def get_resource_tags(): params = dict() if module.params.get('resource_id'): @@ -346,7 +354,7 @@ def get_resource_tags(client, module): return client.list_tags_for_resources(**params) -def list_health_checks(client, module): +def list_health_checks(): params = dict() if module.params.get('next_marker'): @@ -358,15 +366,15 @@ def list_health_checks(client, module): MaxItems=module.params.get('max_items') ) - paginator = client.get_paginator('list_health_checks') - health_checks = paginator.paginate(**params).build_full_result()['HealthChecks'] + health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] + return { "HealthChecks": health_checks, "list": health_checks, } -def record_sets_details(client, module): +def record_sets_details(): params = dict() if module.params.get('hosted_zone_id'): @@ -390,8 +398,7 @@ def record_sets_details(client, module): MaxItems=module.params.get('max_items') ) - paginator = client.get_paginator('list_resource_record_sets') - record_sets = paginator.paginate(**params).build_full_result()['ResourceRecordSets'] + record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] return { "ResourceRecordSets": record_sets, @@ -399,7 +406,7 @@ def record_sets_details(client, module): } -def health_check_details(client, module): +def health_check_details(): health_check_invocations = { 'list': list_health_checks, 'details': get_health_check, @@ -409,11 +416,11 @@ def health_check_details(client, module): 'tags': get_resource_tags, } - results = health_check_invocations[module.params.get('health_check_method')](client, module) + results = health_check_invocations[module.params.get('health_check_method')]() return results -def hosted_zone_details(client, module): +def hosted_zone_details(): hosted_zone_invocations = { 'details': get_hosted_zone, 'list': list_hosted_zones, @@ -422,11 +429,14 @@ def hosted_zone_details(client, module): 'tags': get_resource_tags, } - results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module) + results = hosted_zone_invocations[module.params.get('hosted_zone_method')]() return results def main(): + global module + global client + argument_spec = dict( query=dict(choices=[ 'change', @@ -475,7 +485,7 @@ def main(): ) try: - route53 = module.client('route53') + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') @@ -490,7 +500,7 @@ def main(): results = dict(changed=False) try: - results = invocations[module.params.get('query')](route53, module) + results = invocations[module.params.get('query')]() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json(msg=to_native(e)) diff --git a/plugins/modules/route53_zone.py b/plugins/modules/route53_zone.py index 334e6d62718..ba51fcbb9e2 100644 --- a/plugins/modules/route53_zone.py +++ b/plugins/modules/route53_zone.py @@ -5,7 +5,7 @@ from __future__ import absolute_import, division, print_function __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' module: route53_zone short_description: add or delete Route53 zones version_added: 1.0.0 @@ -65,7 +65,7 @@ author: "Christopher Troup (@minichate)" ''' -EXAMPLES = ''' +EXAMPLES = r''' - name: create a public zone community.aws.route53_zone: zone: example.com @@ -105,7 +105,7 @@ purge_tags: true ''' -RETURN = ''' +RETURN = r''' comment: description: optional hosted zone comment returned: when hosted zone exists @@ -149,6 +149,7 @@ import time from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.community.aws.plugins.module_utils.route53 import manage_tags from ansible_collections.community.aws.plugins.module_utils.route53 import get_tags @@ -158,10 +159,15 @@ pass # caught by AnsibleAWSModule -def find_zones(module, client, zone_in, private_zone): +@AWSRetry.jittered_backoff() +def _list_zones(): + paginator = client.get_paginator('list_hosted_zones') + return paginator.paginate().build_full_result() + + +def find_zones(zone_in, private_zone): try: - paginator = client.get_paginator('list_hosted_zones') - results = paginator.paginate().build_full_result() + results = _list_zones() except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not list current hosted zones") zones = [] @@ -176,7 +182,7 @@ def find_zones(module, client, zone_in, private_zone): return zones -def create(module, client, matching_zones): +def create(matching_zones): zone_in = module.params.get('zone').lower() vpc_id = module.params.get('vpc_id') vpc_region = module.params.get('vpc_region') @@ -201,9 +207,9 @@ def create(module, client, matching_zones): } if private_zone: - changed, result = create_or_update_private(module, client, matching_zones, record) + changed, result = create_or_update_private(matching_zones, record) else: - changed, result = create_or_update_public(module, client, matching_zones, record) + changed, result = create_or_update_public(matching_zones, record) zone_id = result.get('zone_id') if zone_id: @@ -216,7 +222,7 @@ def create(module, client, matching_zones): return changed, result -def create_or_update_private(module, client, matching_zones, record): +def create_or_update_private(matching_zones, record): for z in matching_zones: try: result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids @@ -275,7 +281,7 @@ def create_or_update_private(module, client, matching_zones, record): return changed, record -def create_or_update_public(module, client, matching_zones, record): +def create_or_update_public(matching_zones, record): zone_details, zone_delegation_set_details = None, {} for matching_zone in matching_zones: try: @@ -332,7 +338,7 @@ def create_or_update_public(module, client, matching_zones, record): return changed, record -def delete_private(module, client, matching_zones, vpc_id, vpc_region): +def delete_private(matching_zones, vpc_id, vpc_region): for z in matching_zones: try: result = client.get_hosted_zone(Id=z['Id']) @@ -360,7 +366,7 @@ def delete_private(module, client, matching_zones, vpc_id, vpc_region): return False, "The vpc_id and the vpc_region do not match a private hosted zone." -def delete_public(module, client, matching_zones): +def delete_public(matching_zones): if len(matching_zones) > 1: changed = False msg = "There are multiple zones that match. Use hosted_zone_id to specify the correct zone." @@ -375,7 +381,7 @@ def delete_public(module, client, matching_zones): return changed, msg -def delete_hosted_id(module, client, hosted_zone_id, matching_zones): +def delete_hosted_id(hosted_zone_id, matching_zones): if hosted_zone_id == "all": deleted = [] for z in matching_zones: @@ -401,7 +407,7 @@ def delete_hosted_id(module, client, hosted_zone_id, matching_zones): return changed, msg -def delete(module, client, matching_zones): +def delete(matching_zones): zone_in = module.params.get('zone').lower() vpc_id = module.params.get('vpc_id') vpc_region = module.params.get('vpc_region') @@ -414,12 +420,12 @@ def delete(module, client, matching_zones): if zone_in in [z['Name'] for z in matching_zones]: if hosted_zone_id: - changed, result = delete_hosted_id(module, client, hosted_zone_id, matching_zones) + changed, result = delete_hosted_id(hosted_zone_id, matching_zones) else: if private_zone: - changed, result = delete_private(module, client, matching_zones, vpc_id, vpc_region) + changed, result = delete_private(matching_zones, vpc_id, vpc_region) else: - changed, result = delete_public(module, client, matching_zones) + changed, result = delete_public(matching_zones) else: changed = False result = "No zone to delete." @@ -428,6 +434,9 @@ def delete(module, client, matching_zones): def main(): + global module + global client + argument_spec = dict( zone=dict(required=True), state=dict(default='present', choices=['present', 'absent']), @@ -461,13 +470,13 @@ def main(): private_zone = bool(vpc_id and vpc_region) - client = module.client('route53') + client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) - zones = find_zones(module, client, zone_in, private_zone) + zones = find_zones(zone_in, private_zone) if state == 'present': - changed, result = create(module, client, matching_zones=zones) + changed, result = create(matching_zones=zones) elif state == 'absent': - changed, result = delete(module, client, matching_zones=zones) + changed, result = delete(matching_zones=zones) if isinstance(result, dict): module.exit_json(changed=changed, result=result, **result) From 55962ff21023ffdc6ecc1f985503c63852b3b19b Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 4 Feb 2022 16:08:05 +0000 Subject: [PATCH 18/31] Add deregistration_connection_termination to elb_target_group (#913) Add deregistration_connection_termination to elb_target_group SUMMARY Adding support for the deregistration_connection_termination param in the elb_target_group module. Along with this I've enabled and fixed up the integration tests. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_target_group ADDITIONAL INFORMATION The API param is deregistration_delay.connection_termination.enabled https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_group_attributes Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- .../fragments/913-tg-dereg-conn-param.yml | 2 + plugins/modules/elb_target_group.py | 22 +- tests/integration/targets/elb_target/aliases | 4 +- .../targets/elb_target/defaults/main.yml | 5 +- .../targets/elb_target/meta/main.yml | 3 + .../targets/elb_target/tasks/ec2_target.yml | 205 ++++++------------ .../elb_target/tasks/lambda_target.yml | 47 ++-- .../targets/elb_target/tasks/main.yml | 5 +- 8 files changed, 128 insertions(+), 165 deletions(-) create mode 100644 changelogs/fragments/913-tg-dereg-conn-param.yml create mode 100644 tests/integration/targets/elb_target/meta/main.yml diff --git a/changelogs/fragments/913-tg-dereg-conn-param.yml b/changelogs/fragments/913-tg-dereg-conn-param.yml new file mode 100644 index 00000000000..d4526ebd703 --- /dev/null +++ b/changelogs/fragments/913-tg-dereg-conn-param.yml @@ -0,0 +1,2 @@ +minor_changes: + - elb_target_group - add support for parameter ``deregistration_connection_termination`` (https://github.com/ansible-collections/community.aws/pull/913). diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py index 9a740422293..20e9c2b19da 100644 --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -22,6 +22,13 @@ - The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. type: int + deregistration_connection_termination: + description: + - Indicates whether the load balancer terminates connections at the end of the deregistration timeout. + type: bool + default: false + required: false + version_added: 3.1.0 health_check_protocol: description: - The protocol the load balancer uses when performing health checks on targets. @@ -305,6 +312,11 @@ returned: when state present type: int sample: 300 +deregistration_connection_termination: + description: Indicates whether the load balancer terminates connections at the end of the deregistration timeout. + returned: when state present + type: bool + sample: True health_check_interval_seconds: description: The approximate amount of time, in seconds, between health checks of an individual target. returned: when state present @@ -425,7 +437,7 @@ def get_tg_attributes(connection, module, tg_arn): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get target group attributes") - # Replace '.' with '_' in attribute key names to make it more Ansibley + # Replace '.' with '_' in attribute key names to make it more Ansible friendly return dict((k.replace('.', '_'), v) for k, v in tg_attributes.items()) @@ -486,6 +498,7 @@ def create_or_update_target_group(connection, module): tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") deregistration_delay_timeout = module.params.get("deregistration_delay_timeout") + deregistration_connection_termination = module.params.get("deregistration_connection_termination") stickiness_enabled = module.params.get("stickiness_enabled") stickiness_lb_cookie_duration = module.params.get("stickiness_lb_cookie_duration") stickiness_type = module.params.get("stickiness_type") @@ -767,6 +780,9 @@ def create_or_update_target_group(connection, module): if deregistration_delay_timeout is not None: if str(deregistration_delay_timeout) != current_tg_attributes['deregistration_delay_timeout_seconds']: update_attributes.append({'Key': 'deregistration_delay.timeout_seconds', 'Value': str(deregistration_delay_timeout)}) + if deregistration_connection_termination is not None: + if deregistration_connection_termination and current_tg_attributes.get('deregistration_delay_connection_termination_enabled') != "true": + update_attributes.append({'Key': 'deregistration_delay.connection_termination.enabled', 'Value': 'true'}) if stickiness_enabled is not None: if stickiness_enabled and current_tg_attributes['stickiness_enabled'] != "true": update_attributes.append({'Key': 'stickiness.enabled', 'Value': 'true'}) @@ -855,6 +871,7 @@ def main(): 'HTTPS', 'TCP', 'TLS', 'UDP', 'TCP_UDP'] argument_spec = dict( deregistration_delay_timeout=dict(type='int'), + deregistration_connection_termination=dict(type='bool', default=False), health_check_protocol=dict(choices=protocols_list), health_check_port=dict(), health_check_path=dict(), @@ -897,6 +914,9 @@ def main(): connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) if module.params.get('state') == 'present': + if module.params.get('protocol') in ['http', 'https', 'HTTP', 'HTTPS'] and module.params.get('deregistration_connection_termination', None): + module.fail_json(msg="A target group with HTTP/S protocol does not support setting deregistration_connection_termination") + create_or_update_target_group(connection, module) else: delete_target_group(connection, module) diff --git a/tests/integration/targets/elb_target/aliases b/tests/integration/targets/elb_target/aliases index 36af861d8b9..5d10f812415 100644 --- a/tests/integration/targets/elb_target/aliases +++ b/tests/integration/targets/elb_target/aliases @@ -1,6 +1,4 @@ cloud/aws -# currently broken -# e.g: https://3d7660cef77b937e1585-998cb574f2547d50f5110d6a2d4ac097.ssl.cf1.rackcdn.com/636/067f6f84c20701ccf4bf0654471613af598c6e89/check/ansible-test-cloud-integration-aws-py36_2/be6c4b3/job-output.txt -disabled + slow elb_target_group diff --git a/tests/integration/targets/elb_target/defaults/main.yml b/tests/integration/targets/elb_target/defaults/main.yml index 14068f1e5c0..88f68a0bf43 100644 --- a/tests/integration/targets/elb_target/defaults/main.yml +++ b/tests/integration/targets/elb_target/defaults/main.yml @@ -4,12 +4,13 @@ unique_id: "ansible-test-{{ tiny_prefix }}" lambda_role_name: '{{ unique_id }}-elb-target' lambda_name: '{{ unique_id }}-elb-target' -elb_target_group_name: "{{ unique_id }}-elb-tg" +elb_target_group_name: "{{ unique_id }}-elb" # Defaults used by the EC2 based test ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2' tg_name: "{{ unique_id }}-tg" -tg_tcpudp_name: "{{ unique_id }}-tgtcpudp" +tg_used_name: "{{ unique_id }}-tgu" +tg_tcpudp_name: "{{ unique_id }}-udp" lb_name: "{{ unique_id }}-lb" healthy_state: state: 'healthy' diff --git a/tests/integration/targets/elb_target/meta/main.yml b/tests/integration/targets/elb_target/meta/main.yml new file mode 100644 index 00000000000..e9ce9b3e3ed --- /dev/null +++ b/tests/integration/targets/elb_target/meta/main.yml @@ -0,0 +1,3 @@ +dependencies: + - prepare_tests + - setup_ec2 \ No newline at end of file diff --git a/tests/integration/targets/elb_target/tasks/ec2_target.yml b/tests/integration/targets/elb_target/tasks/ec2_target.yml index f350672cafe..da85c705c63 100644 --- a/tests/integration/targets/elb_target/tasks/ec2_target.yml +++ b/tests/integration/targets/elb_target/tasks/ec2_target.yml @@ -1,7 +1,6 @@ --- - - name: set up ec2 based test prerequisites - block: - +- name: set up ec2 based test prerequisites + block: # ============================================================ - name: @@ -10,12 +9,12 @@ # ============================================================ - name: Find AMI to use ec2_ami_info: - owners: 'amazon' + owners: "amazon" filters: - name: '{{ ec2_ami_name }}' + name: "{{ ec2_ami_name }}" register: ec2_amis - set_fact: - ec2_ami_image: '{{ ec2_amis.images[0].image_id }}' + ec2_ami_image: "{{ ec2_amis.images[0].image_id }}" - name: set up testing VPC ec2_vpc_net: @@ -88,7 +87,7 @@ health_check_port: 80 protocol: http port: 80 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance tags: @@ -99,7 +98,7 @@ name: "{{ tg_tcpudp_name }}" protocol: udp port: 53 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance tags: @@ -108,15 +107,27 @@ - name: set up testing target group for ALB (type=instance) elb_target_group: - name: "{{ tg_name }}-used" + name: "{{ tg_used_name }}" health_check_port: 80 protocol: http port: 80 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance tags: Description: "Created by {{ resource_prefix }}" + register: result + + - name: set up testing target group for ALB (type=instance) + assert: + that: + - result.changed + - result.target_group_name == tg_used_name + - result.target_type == 'instance' + - result.vpc_id == vpc.vpc.id + - result.port == 80 + - '"health_check_port" in result' + - '"tags" in result' - name: set up testing target group for NLB (type=instance) elb_target_group: @@ -124,9 +135,11 @@ health_check_port: 80 protocol: tcp port: 80 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance + deregistration_delay_timeout: 60 + deregistration_connection_termination: yes tags: Description: "Created by {{ resource_prefix }}" register: result @@ -143,7 +156,9 @@ - '"target_group_arn" in result' - result.target_group_name == "{{ tg_name }}-nlb" - result.target_type == 'instance' - - result.vpc_id == '{{ vpc.vpc.id }}' + - result.deregistration_delay_timeout_seconds == '60' + - result.deregistration_delay_connection_termination_enabled + - result.vpc_id == vpc.vpc.id - name: set up ec2 instance to use as a target ec2_instance: @@ -157,6 +172,8 @@ volumes: [] wait: true ebs_optimized: false + tags: + Name: "{{ resource_prefix }}-inst" user_data: | #cloud-config package_upgrade: true @@ -184,7 +201,7 @@ Port: 80 DefaultActions: - Type: forward - TargetGroupName: "{{ tg_name }}-used" + TargetGroupName: "{{ tg_used_name }}" state: present - name: create a network load balancer @@ -194,9 +211,9 @@ - "{{ subnet_1.subnet.id }}" - "{{ subnet_2.subnet.id }}" listeners: - - Protocol: TCP - Port: 80 - DefaultActions: + - Protocol: TCP + Port: 80 + DefaultActions: - Type: forward TargetGroupName: "{{ tg_name }}-nlb" state: present @@ -218,7 +235,7 @@ health_check_port: 80 protocol: tcp port: 80 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance modify_targets: true @@ -240,7 +257,7 @@ health_check_port: 80 protocol: tcp port: 80 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance modify_targets: true @@ -262,7 +279,7 @@ health_check_port: 80 protocol: tcp port: 80 - vpc_id: '{{ vpc.vpc.id }}' + vpc_id: "{{ vpc.vpc.id }}" state: present target_type: instance modify_targets: true @@ -334,7 +351,7 @@ - name: register an instance to used target group and wait until healthy elb_target: - target_group_name: "{{ tg_name }}-used" + target_group_name: "{{ tg_used_name }}" target_id: "{{ instance_id }}" state: present target_status: healthy @@ -353,7 +370,7 @@ - name: remove a target from used target group elb_target: - target_group_name: "{{ tg_name }}-used" + target_group_name: "{{ tg_used_name }}" target_id: "{{ instance_id }}" state: absent target_status: unused @@ -369,7 +386,7 @@ - name: test idempotence elb_target: - target_group_name: "{{ tg_name }}-used" + target_group_name: "{{ tg_used_name }}" target_id: "{{ instance_id }}" state: absent register: result @@ -383,7 +400,7 @@ - name: register an instance to used target group and wait until healthy again to test deregistering differently elb_target: - target_group_name: "{{ tg_name }}-used" + target_group_name: "{{ tg_used_name }}" target_id: "{{ instance_id }}" state: present target_status: healthy @@ -400,7 +417,7 @@ - name: start deregisteration but don't wait elb_target: - target_group_name: "{{ tg_name }}-used" + target_group_name: "{{ tg_used_name }}" target_id: "{{ instance_id }}" state: absent register: result @@ -413,7 +430,7 @@ - name: now wait for target to finish deregistering elb_target: - target_group_name: "{{ tg_name }}-used" + target_group_name: "{{ tg_used_name }}" target_id: "{{ instance_id }}" state: absent target_status: unused @@ -426,98 +443,29 @@ - not result.changed - not result.target_health_descriptions - # ============================================================ - - always: + # ============================================================ + always: - name: debug: msg="********** Tearing down elb_target test dependencies **********" - name: remove ec2 instance ec2_instance: - name: "{{ resource_prefix }}-inst" + instance_ids: + - "{{ instance_id }}" state: absent wait: True - ignore_errors: true - - - name: remove testing target groups - elb_target_group: - name: "{{ item }}" - health_check_port: 80 - protocol: http - port: 80 - vpc_id: '{{ vpc.vpc.id }}' - state: absent - target_type: instance - tags: - Description: "Created by {{ resource_prefix }}" - wait: true - wait_timeout: 400 - register: removed - retries: 10 - until: removed is not failed - with_items: - - "{{ tg_name }}" - - "{{ tg_name }}-used" - ignore_errors: true - - - name: remove udp testing target groups - elb_target_group: - name: "{{ item }}" - protocol: udp - port: 53 - vpc_id: '{{ vpc.vpc.id }}' - state: absent - target_type: instance - tags: - Description: "Created by {{ resource_prefix }}" - Protocol: "UDP" - wait: true - wait_timeout: 400 - register: removed - retries: 10 - until: removed is not failed - with_items: - - "{{ tg_tcpudp_name }}" - ignore_errors: true - - - name: remove tcp testing target groups - elb_target_group: - name: "{{ item }}" - protocol: tcp - port: 80 - vpc_id: '{{ vpc.vpc.id }}' - state: absent - target_type: instance - tags: - Description: "Created by {{ resource_prefix }}" - Protocol: "UDP" - wait: true - wait_timeout: 400 register: removed retries: 10 until: removed is not failed - with_items: - - "{{ tg_name }}-nlb" ignore_errors: true - name: remove application load balancer elb_application_lb: name: "{{ lb_name }}" - security_groups: - - "{{ sg.group_id }}" - subnets: - - "{{ subnet_1.subnet.id }}" - - "{{ subnet_2.subnet.id }}" - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}-used" state: absent wait: true - wait_timeout: 400 + wait_timeout: 600 register: removed retries: 10 until: removed is not failed @@ -526,41 +474,28 @@ - name: remove network load balancer elb_network_lb: name: "{{ lb_name }}-nlb" - subnets: - - "{{ subnet_1.subnet.id }}" - - "{{ subnet_2.subnet.id }}" - listeners: - - Protocol: TCP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}-nlb" state: absent wait: true - wait_timeout: 400 + wait_timeout: 600 register: removed retries: 10 until: removed is not failed ignore_errors: true - - name: remove testing security group - ec2_group: + - name: remove testing target groups + elb_target_group: + name: "{{ item }}" state: absent - name: "{{ resource_prefix }}-sg" - description: a security group for ansible tests - vpc_id: "{{ vpc.vpc.id }}" - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 + wait: true + wait_timeout: 600 register: removed retries: 10 until: removed is not failed + with_items: + - "{{ tg_name }}" + - "{{ tg_used_name }}" + - "{{ tg_tcpudp_name }}" + - "{{ tg_name }}-nlb" ignore_errors: true - name: remove routing rules @@ -579,10 +514,8 @@ vpc_id: "{{ vpc.vpc.id }}" cidr: 20.0.0.0/18 az: "{{ aws_region }}a" - resource_tags: - Name: "{{ resource_prefix }}-subnet" register: removed - retries: 10 + retries: 15 until: removed is not failed ignore_errors: true @@ -592,10 +525,17 @@ vpc_id: "{{ vpc.vpc.id }}" cidr: 20.0.64.0/18 az: "{{ aws_region }}b" - resource_tags: - Name: "{{ resource_prefix }}-subnet" register: removed - retries: 10 + retries: 15 + until: removed is not failed + ignore_errors: true + + - name: remove testing security group + ec2_group: + state: absent + name: "{{ resource_prefix }}-sg" + register: removed + retries: 15 until: removed is not failed ignore_errors: true @@ -611,14 +551,11 @@ - name: remove testing VPC ec2_vpc_net: name: "{{ resource_prefix }}-vpc" - state: absent cidr_block: 20.0.0.0/16 - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" + state: absent register: removed retries: 10 until: removed is not failed ignore_errors: true - # ============================================================ + # ============================================================ diff --git a/tests/integration/targets/elb_target/tasks/lambda_target.yml b/tests/integration/targets/elb_target/tasks/lambda_target.yml index 8b7955ddbe3..f43c490bf5b 100644 --- a/tests/integration/targets/elb_target/tasks/lambda_target.yml +++ b/tests/integration/targets/elb_target/tasks/lambda_target.yml @@ -1,29 +1,30 @@ - name: set up lambda as elb_target block: - - name: create zip to deploy lambda code archive: - path: '{{ role_path }}/files/ansible_lambda_target.py' - dest: /tmp/lambda.zip format: zip + path: "{{ role_path }}/files/ansible_lambda_target.py" + dest: "/tmp/lambda.zip" + - name: create or update service-role for lambda iam_role: - name: '{{ lambda_role_name }}' + name: "{{ lambda_role_name }}" assume_role_policy_document: '{{ lookup("file", role_path + "/files/assume-role.json") }}' managed_policy: - - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess' + - "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess" register: ROLE_ARN + - name: when it is too fast, the role is not usable. pause: seconds: 10 - name: deploy lambda.zip to ansible_lambda_target function lambda: - name: '{{ lambda_name }}' + name: "{{ lambda_name }}" state: present zip_file: /tmp/lambda.zip runtime: python3.7 - role: '{{ ROLE_ARN.arn }}' + role: "{{ ROLE_ARN.arn }}" handler: ansible_lambda_target.lambda_handler timeout: 30 register: lambda_function @@ -33,7 +34,7 @@ - name: create empty target group elb_target_group: - name: '{{ elb_target_group_name }}' + name: "{{ elb_target_group_name }}" target_type: lambda state: present modify_targets: false @@ -42,49 +43,49 @@ - name: tg is created, state must be changed assert: that: - - elb_target_group.changed + - elb_target_group.changed - name: allow elb to invoke the lambda function lambda_policy: state: present - function_name: '{{ lambda_name }}' - version: '{{ lambda_function.configuration.version }}' + function_name: "{{ lambda_name }}" + version: "{{ lambda_function.configuration.version }}" statement_id: elb1 action: lambda:InvokeFunction principal: elasticloadbalancing.amazonaws.com - source_arn: '{{ elb_target_group.target_group_arn }}' + source_arn: "{{ elb_target_group.target_group_arn }}" - name: add lambda to elb target elb_target_group: - name: '{{ elb_target_group_name }}' + name: "{{ elb_target_group_name }}" target_type: lambda state: present targets: - - Id: '{{ lambda_function.configuration.function_arn }}' + - Id: "{{ lambda_function.configuration.function_arn }}" register: elb_target_group - name: target is updated, state must be changed assert: that: - - elb_target_group.changed + - elb_target_group.changed - name: re-add lambda to elb target (idempotency) elb_target_group: - name: '{{ elb_target_group_name }}' + name: "{{ elb_target_group_name }}" target_type: lambda state: present targets: - - Id: '{{ lambda_function.configuration.function_arn }}' + - Id: "{{ lambda_function.configuration.function_arn }}" register: elb_target_group - name: target is still the same, state must not be changed (idempotency) assert: that: - - not elb_target_group.changed + - not elb_target_group.changed - name: remove lambda target from target group elb_target_group: - name: '{{ elb_target_group_name }}' + name: "{{ elb_target_group_name }}" target_type: lambda state: absent targets: [] @@ -93,24 +94,24 @@ - name: target is still the same, state must not be changed (idempotency) assert: that: - - elb_target_group.changed + - elb_target_group.changed always: - name: remove elb target group elb_target_group: - name: '{{ elb_target_group_name }}' + name: "{{ elb_target_group_name }}" target_type: lambda state: absent ignore_errors: true - name: remove lambda function lambda: - name: '{{ lambda_name }}' + name: "{{ lambda_name }}" state: absent ignore_errors: true - name: remove iam role for lambda iam_role: - name: '{{ lambda_role_name }}' + name: "{{ lambda_role_name }}" state: absent ignore_errors: true diff --git a/tests/integration/targets/elb_target/tasks/main.yml b/tests/integration/targets/elb_target/tasks/main.yml index e6c62f922d3..7627fc83219 100644 --- a/tests/integration/targets/elb_target/tasks/main.yml +++ b/tests/integration/targets/elb_target/tasks/main.yml @@ -7,7 +7,8 @@ security_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: + - community.general - amazon.aws block: - - include_tasks: lambda_target.yml - - include_tasks: ec2_target.yml + - include_tasks: ec2_target.yml + - include_tasks: lambda_target.yml From 45e79ed2e8a60d87b20f77cfbef4ba8a27da1260 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Fri, 4 Feb 2022 16:10:43 +0000 Subject: [PATCH 19/31] Fix IOPs io1 DB instance updates and integration tests also (#878) Fix IOPs io1 DB instance updates and integration tests also SUMMARY Primary this PR is to fix updates when updating iops or allocated_storage on io1 DB instances when only one param is changing. Secondarily this fixes up the tests again and is test against some improvements to the waiter configuration see linked PR. IOPs error on update attempts if only one param is being updated: error: code: InvalidParameterCombination message: You must specify both the storage size and iops when modifying the storage size or iops on a DB instance that has iops. type: Sender msg: 'Unable to modify DB instance: An error occurred (InvalidParameterCombination) when calling the ModifyDBInstance operation: You must specify both the storage size and iops when modifying the storage size or iops on a DB instance that has iops.' ISSUE TYPE Bugfix Pull Request COMPONENT NAME rds_instance ADDITIONAL INFORMATION These tests are very slow and still a little flakey but generally all pass as expected now locally. Reviewed-by: Mark Woolley Reviewed-by: Markus Bergholz Reviewed-by: Alina Buzachis --- .../fragments/878-fix-iops-updates-rds.yml | 2 + plugins/modules/rds_instance.py | 19 ++++- .../integration/targets/rds_instance/aliases | 1 - .../targets/rds_instance/inventory | 17 ++-- .../integration/targets/rds_instance/main.yml | 2 +- .../roles/rds_instance/defaults/main.yml | 6 +- ...st_modify_complex.yml => test_complex.yml} | 30 +++---- ...cessor_features.yml => test_processor.yml} | 0 ...test_read_replica.yml => test_replica.yml} | 0 ..._restore_instance.yml => test_restore.yml} | 0 ...c_security_groups.yml => test_sgroups.yml} | 1 - .../roles/rds_instance/tasks/test_states.yml | 30 ------- .../roles/rds_instance/tasks/test_tagging.yml | 30 +++++++ .../roles/rds_instance/tasks/test_upgrade.yml | 82 +++++++++++++++++++ 14 files changed, 165 insertions(+), 55 deletions(-) create mode 100644 changelogs/fragments/878-fix-iops-updates-rds.yml rename tests/integration/targets/rds_instance/roles/rds_instance/tasks/{test_modify_complex.yml => test_complex.yml} (82%) rename tests/integration/targets/rds_instance/roles/rds_instance/tasks/{test_processor_features.yml => test_processor.yml} (100%) rename tests/integration/targets/rds_instance/roles/rds_instance/tasks/{test_read_replica.yml => test_replica.yml} (100%) rename tests/integration/targets/rds_instance/roles/rds_instance/tasks/{test_restore_instance.yml => test_restore.yml} (100%) rename tests/integration/targets/rds_instance/roles/rds_instance/tasks/{test_vpc_security_groups.yml => test_sgroups.yml} (98%) create mode 100644 tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_upgrade.yml diff --git a/changelogs/fragments/878-fix-iops-updates-rds.yml b/changelogs/fragments/878-fix-iops-updates-rds.yml new file mode 100644 index 00000000000..cb4b22f47ab --- /dev/null +++ b/changelogs/fragments/878-fix-iops-updates-rds.yml @@ -0,0 +1,2 @@ +bugfixes: + - rds_instance - Fix updates of ``iops`` or ``allocated_storage`` for ``io1`` DB instances when only one value is changing (https://github.com/ansible-collections/community.aws/pull/878). diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index 92d5e257cf0..742a7266c5e 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -467,10 +467,15 @@ RETURN = r''' allocated_storage: - description: The allocated storage size in gibibytes. This is always 1 for aurora database engines. + description: The allocated storage size in gigabytes. This is always 1 for aurora database engines. returned: always type: int sample: 20 +associated_roles: + description: The list of currently associated roles. + returned: always + type: list + sample: [] auto_minor_version_upgrade: description: Whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. returned: always @@ -890,6 +895,17 @@ def get_options_with_changing_values(client, module, parameters): updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) parameters = updated_parameters + if instance.get('StorageType') == 'io1': + # Bundle Iops and AllocatedStorage while updating io1 RDS Instance + current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) + current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) + new_iops = module.params.get('iops') + new_allocated_storage = module.params.get('allocated_storage') + + if current_iops != new_iops or current_allocated_storage != new_allocated_storage: + parameters['AllocatedStorage'] = new_allocated_storage + parameters['Iops'] = new_iops + if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: parameters.pop('NewDBInstanceIdentifier') @@ -1179,6 +1195,7 @@ def main(): ('engine', 'aurora', ('db_cluster_identifier',)), ('engine', 'aurora-mysql', ('db_cluster_identifier',)), ('engine', 'aurora-postresql', ('db_cluster_identifier',)), + ('storage_type', 'io1', ('iops', 'allocated_storage')), ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), ('creation_source', 's3', ( 's3_bucket_name', 'engine', 'master_username', 'master_user_password', diff --git a/tests/integration/targets/rds_instance/aliases b/tests/integration/targets/rds_instance/aliases index 67aa5c052e2..e30a1801b1e 100644 --- a/tests/integration/targets/rds_instance/aliases +++ b/tests/integration/targets/rds_instance/aliases @@ -1,4 +1,3 @@ -disabled # something is broken slow cloud/aws diff --git a/tests/integration/targets/rds_instance/inventory b/tests/integration/targets/rds_instance/inventory index 9daf5db1e07..e0443d829d6 100644 --- a/tests/integration/targets/rds_instance/inventory +++ b/tests/integration/targets/rds_instance/inventory @@ -1,12 +1,19 @@ +# inventory names shortened down to fit resource name length limits [tests] +# processor feature tests +processor +# restore instance tests +restore +# security groups db tests +sgroups +# modify complex tests +complex +# other tests states modify -modify_complex -processor_features -read_replica -vpc_security_groups -restore_instance tagging +replica +upgrade # TODO: uncomment after adding rds_cluster module # aurora diff --git a/tests/integration/targets/rds_instance/main.yml b/tests/integration/targets/rds_instance/main.yml index 1b33dab5076..7d0dd4f8990 100644 --- a/tests/integration/targets/rds_instance/main.yml +++ b/tests/integration/targets/rds_instance/main.yml @@ -6,6 +6,6 @@ - hosts: all gather_facts: no strategy: free - serial: 8 + serial: 9 roles: - rds_instance diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/defaults/main.yml b/tests/integration/targets/rds_instance/roles/rds_instance/defaults/main.yml index 35d5dd7bdac..10f39d1ee22 100644 --- a/tests/integration/targets/rds_instance/roles/rds_instance/defaults/main.yml +++ b/tests/integration/targets/rds_instance/roles/rds_instance/defaults/main.yml @@ -1,5 +1,5 @@ --- -instance_id: "ansible-test-{{ tiny_prefix }}" +instance_id: "ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix }}" modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 @@ -8,8 +8,12 @@ storage_encrypted_db_instance_class: db.t3.small modified_db_instance_class: db.t3.medium allocated_storage: 20 modified_allocated_storage: 30 +io1_allocated_storage: 100 +io1_modified_allocated_storage: 110 monitoring_interval: 60 preferred_maintenance_window: "mon:06:20-mon:06:50" +storage_type: io1 +iops: 1000 # For aurora tests cluster_id: "{{ instance_id }}-cluster" diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_modify_complex.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_complex.yml similarity index 82% rename from tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_modify_complex.yml rename to tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_complex.yml index 066d35c11d9..c9d8b3a4c5f 100644 --- a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_modify_complex.yml +++ b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_complex.yml @@ -31,7 +31,9 @@ username: "{{ username }}" password: "{{ password }}" db_instance_class: "{{ db_instance_class }}" - allocated_storage: "{{ allocated_storage }}" + allocated_storage: "{{ io1_allocated_storage }}" + storage_type: "{{ storage_type }}" + iops: "{{ iops }}" register: result - assert: @@ -48,47 +50,46 @@ rds_instance: id: "{{ instance_id }}" state: present - allocated_storage: 30 + allocated_storage: "{{ io1_modified_allocated_storage }}" + storage_type: "{{ storage_type }}" db_instance_class: "{{ modified_db_instance_class }}" backup_retention_period: 2 preferred_backup_window: "05:00-06:00" preferred_maintenance_window: "{{ preferred_maintenance_window }}" - engine_version: "{{ mariadb_engine_version_2 }}" - allow_major_version_upgrade: true auto_minor_version_upgrade: false monitoring_interval: "{{ monitoring_interval }}" monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}" + iops: "{{ iops }}" port: 1150 - max_allocated_storage: 100 + max_allocated_storage: 150 apply_immediately: True register: result - assert: that: - result.changed - - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30' - - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 100' + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == io1_modified_allocated_storage' + - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 150' - '"port" in result.pending_modified_values or result.endpoint.port == 1150' - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == modified_db_instance_class' - - '"engine_version" in result.pending_modified_values or result.engine_version == mariadb_engine_version_2' - '"monitoring_interval" in result.pending_modified_values or result.monitoring_interval == monitoring_interval' - name: Idempotence modifying several pending attributes rds_instance: id: "{{ instance_id }}" state: present - allocated_storage: 30 + allocated_storage: "{{ io1_modified_allocated_storage }}" + storage_type: "{{ storage_type }}" db_instance_class: "{{ modified_db_instance_class }}" backup_retention_period: 2 preferred_backup_window: "05:00-06:00" preferred_maintenance_window: "{{ preferred_maintenance_window }}" - engine_version: "{{ mariadb_engine_version_2 }}" - allow_major_version_upgrade: true auto_minor_version_upgrade: false monitoring_interval: "{{ monitoring_interval }}" monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}" + iops: "{{ iops }}" port: 1150 - max_allocated_storage: 100 + max_allocated_storage: 150 register: result retries: 30 delay: 10 @@ -97,11 +98,10 @@ - assert: that: - not result.changed - - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == 30' - - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 100' + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == io1_modified_allocated_storage' + - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 150' - '"port" in result.pending_modified_values or result.endpoint.port == 1150' - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == modified_db_instance_class' - - '"engine_version" in result.pending_modified_values or result.engine_version == mariadb_engine_version_2' always: - name: Delete the instance diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_processor_features.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_processor.yml similarity index 100% rename from tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_processor_features.yml rename to tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_processor.yml diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_read_replica.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_replica.yml similarity index 100% rename from tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_read_replica.yml rename to tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_replica.yml diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_restore_instance.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_restore.yml similarity index 100% rename from tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_restore_instance.yml rename to tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_restore.yml diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_vpc_security_groups.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_sgroups.yml similarity index 98% rename from tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_vpc_security_groups.yml rename to tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_sgroups.yml index d797d965323..82e63578554 100644 --- a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_vpc_security_groups.yml +++ b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_sgroups.yml @@ -28,7 +28,6 @@ - {"cidr": "10.122.122.128/28", "zone": "{{ aws_region }}a"} - {"cidr": "10.122.122.144/28", "zone": "{{ aws_region }}b"} - {"cidr": "10.122.122.160/28", "zone": "{{ aws_region }}c"} - - {"cidr": "10.122.122.176/28", "zone": "{{ aws_region }}d"} - name: Create security groups ec2_group: diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_states.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_states.yml index dd13d55e164..c0d36b85943 100644 --- a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_states.yml +++ b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_states.yml @@ -119,37 +119,7 @@ that: - result.changed - # Test final snapshot on deletion - - name: Delete the DB instance - rds_instance: - id: "{{ instance_id }}" - state: absent - final_snapshot_identifier: "{{ instance_id }}" - register: result - - - assert: - that: - - result.changed - - "result.final_snapshot.db_instance_identifier == '{{ instance_id }}'" - - - name: Check that snapshot exists - rds_snapshot_info: - db_snapshot_identifier: "{{ instance_id }}" - register: result - - - assert: - that: - - "result.snapshots | length == 1" - - "result.snapshots.0.engine == 'mariadb'" - always: - - name: remove snapshot - rds_instance_snapshot: - db_snapshot_identifier: "{{ resource_prefix }}-test-snapshot" - state: absent - wait: false - ignore_errors: yes - - name: Remove DB instance rds_instance: id: "{{ instance_id }}" diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_tagging.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_tagging.yml index 0cdd9c1b7a5..bb84a63d95d 100644 --- a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_tagging.yml +++ b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_tagging.yml @@ -119,7 +119,37 @@ - "result.tags | length == 2" - "result.tags.Name == '{{ instance_id }}-new'" + # Test final snapshot on deletion + - name: Delete the DB instance + rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ instance_id }}" + register: result + + - assert: + that: + - result.changed + - "result.final_snapshot.db_instance_identifier == '{{ instance_id }}'" + + - name: Check that snapshot exists + rds_snapshot_info: + db_snapshot_identifier: "{{ instance_id }}" + register: result + + - assert: + that: + - "result.snapshots | length == 1" + - "result.snapshots.0.engine == 'mariadb'" + always: + - name: remove snapshot + rds_instance_snapshot: + db_snapshot_identifier: "{{ tiny_prefix }}-test-snapshot" + state: absent + wait: false + ignore_errors: yes + - name: Remove DB instance rds_instance: id: "{{ instance_id }}" diff --git a/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_upgrade.yml b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_upgrade.yml new file mode 100644 index 00000000000..90833bfd487 --- /dev/null +++ b/tests/integration/targets/rds_instance/roles/rds_instance/tasks/test_upgrade.yml @@ -0,0 +1,82 @@ +--- +- block: + - name: Ensure the resource doesn't exist + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + register: result + + - assert: + that: + - not result.changed + ignore_errors: yes + + - name: Create a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + + - assert: + that: + - result.changed + - "result.db_instance_identifier == '{{ instance_id }}'" + + # Test upgrade of DB instance + + - name: Uprade a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version_2 }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + apply_immediately: True + register: result + + - assert: + that: + - result.changed + - '"engine_version" in result.pending_modified_values or result.engine_version == mariadb_engine_version_2' + + - name: Idempotence uprading a mariadb instance + rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version_2 }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + retries: 30 + delay: 10 + until: result is not failed + + - assert: + that: + - not result.changed + - '"engine_version" in result.pending_modified_values or result.engine_version == mariadb_engine_version_2' + + always: + - name: Delete the instance + rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: True + wait: false + ignore_errors: yes From 716ae77f71cfa7b6300a057892fea768c81819f9 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Fri, 4 Feb 2022 11:33:23 -0800 Subject: [PATCH 20/31] ec2_placement_group: Add partition strategy and partition count (#872) ec2_placement_group: Add partition strategy and partition count SUMMARY Add partition as a strategy and an option, partition_count to choose the actual number of partitions for the community.aws.ec2_placement_group module. Fixes #808 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_placement_group ADDITIONAL INFO Tested locally with - name: Create a partition placement group with partition count 4. ec2_placement_group: name: my-cluster state: present strategy: partition partition_count: 4 Reviewed-by: Alina Buzachis Reviewed-by: Mandar Kulkarni Reviewed-by: Mark Woolley --- ...ec2_placement_group_partition_strategy.yml | 2 + plugins/modules/ec2_placement_group.py | 34 +- .../targets/ec2_placement_group/aliases | 3 + .../ec2_placement_group/defaults/main.yml | 1 + .../targets/ec2_placement_group/meta/main.yml | 1 + .../ec2_placement_group/tasks/env_cleanup.yml | 94 ++++ .../ec2_placement_group/tasks/env_setup.yml | 64 +++ .../ec2_placement_group/tasks/main.yml | 408 ++++++++++++++++++ .../targets/ec2_placement_group/vars/main.yml | 1 + 9 files changed, 604 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/872-ec2_placement_group_partition_strategy.yml create mode 100644 tests/integration/targets/ec2_placement_group/aliases create mode 100644 tests/integration/targets/ec2_placement_group/defaults/main.yml create mode 100644 tests/integration/targets/ec2_placement_group/meta/main.yml create mode 100644 tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml create mode 100644 tests/integration/targets/ec2_placement_group/tasks/env_setup.yml create mode 100644 tests/integration/targets/ec2_placement_group/tasks/main.yml create mode 100644 tests/integration/targets/ec2_placement_group/vars/main.yml diff --git a/changelogs/fragments/872-ec2_placement_group_partition_strategy.yml b/changelogs/fragments/872-ec2_placement_group_partition_strategy.yml new file mode 100644 index 00000000000..3fc6d524c8f --- /dev/null +++ b/changelogs/fragments/872-ec2_placement_group_partition_strategy.yml @@ -0,0 +1,2 @@ +minor_changes: +- ec2_placement_group - add support for partition strategy and partition count (https://github.com/ansible-collections/community.aws/pull/872). diff --git a/plugins/modules/ec2_placement_group.py b/plugins/modules/ec2_placement_group.py index 3ccb2c00802..9ca3bb02ab9 100644 --- a/plugins/modules/ec2_placement_group.py +++ b/plugins/modules/ec2_placement_group.py @@ -23,6 +23,13 @@ - The name for the placement group. required: true type: str + partition_count: + description: + - The number of partitions. + - Valid only when I(Strategy) is set to C(partition). + - Must be a value between C(1) and C(7). + type: int + version_added: 3.1.0 state: description: - Create or delete placement group. @@ -35,7 +42,7 @@ low-latency group in a single Availability Zone, while Spread spreads instances across underlying hardware. default: cluster - choices: [ 'cluster', 'spread' ] + choices: [ 'cluster', 'spread', 'partition' ] type: str extends_documentation_fragment: - amazon.aws.aws @@ -58,6 +65,13 @@ state: present strategy: spread +- name: Create a Partition strategy placement group. + community.aws.ec2_placement_group: + name: my-cluster + state: present + strategy: partition + partition_count: 3 + - name: Delete a placement group. community.aws.ec2_placement_group: name: my-cluster @@ -126,10 +140,21 @@ def get_placement_group_details(connection, module): def create_placement_group(connection, module): name = module.params.get("name") strategy = module.params.get("strategy") + partition_count = module.params.get("partition_count") + + if strategy != 'partition' and partition_count: + module.fail_json( + msg="'partition_count' can only be set when strategy is set to 'partition'.") + + params = {} + params['GroupName'] = name + params['Strategy'] = strategy + if partition_count: + params['PartitionCount'] = partition_count + params['DryRun'] = module.check_mode try: - connection.create_placement_group( - GroupName=name, Strategy=strategy, DryRun=module.check_mode) + connection.create_placement_group(**params) except is_boto3_error_code('DryRunOperation'): module.exit_json(changed=True, placement_group={ "name": name, @@ -165,8 +190,9 @@ def delete_placement_group(connection, module): def main(): argument_spec = dict( name=dict(required=True, type='str'), + partition_count=dict(type='int'), state=dict(default='present', choices=['present', 'absent']), - strategy=dict(default='cluster', choices=['cluster', 'spread']) + strategy=dict(default='cluster', choices=['cluster', 'spread', 'partition']) ) module = AnsibleAWSModule( diff --git a/tests/integration/targets/ec2_placement_group/aliases b/tests/integration/targets/ec2_placement_group/aliases new file mode 100644 index 00000000000..c6944e7b949 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/aliases @@ -0,0 +1,3 @@ +cloud/aws + +ec2_placement_group_info diff --git a/tests/integration/targets/ec2_placement_group/defaults/main.yml b/tests/integration/targets/ec2_placement_group/defaults/main.yml new file mode 100644 index 00000000000..ed97d539c09 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/tests/integration/targets/ec2_placement_group/meta/main.yml b/tests/integration/targets/ec2_placement_group/meta/main.yml new file mode 100644 index 00000000000..ed97d539c09 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/meta/main.yml @@ -0,0 +1 @@ +--- diff --git a/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml b/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml new file mode 100644 index 00000000000..9e5ae6a9380 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/tasks/env_cleanup.yml @@ -0,0 +1,94 @@ +- name: remove any instances in the test VPC + ec2_instance: + filters: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: Get ENIs + ec2_eni_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + register: enis + +- name: delete all ENIs + ec2_eni: + eni_id: "{{ item.id }}" + state: absent + until: removed is not failed + with_items: "{{ enis.network_interfaces }}" + ignore_errors: yes + retries: 10 + +- name: remove the security group + ec2_group: + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove routing rules + ec2_vpc_route_table: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + tags: + created: "{{ resource_prefix }}-route" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ testing_subnet_a.subnet.id }}" + - "{{ testing_subnet_b.subnet.id }}" + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove internet gateway + ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: absent + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet A + ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.32.0/24 + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove subnet B + ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.33.0/24 + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 + +- name: remove the VPC + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.22.32.0/23 + state: absent + tags: + Name: Ansible Testing VPC + tenancy: default + register: removed + until: removed is not failed + ignore_errors: yes + retries: 10 diff --git a/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml b/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml new file mode 100644 index 00000000000..88f5bb6fe22 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/tasks/env_setup.yml @@ -0,0 +1,64 @@ +- name: Create VPC for use in testing + ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.22.32.0/23 + tags: + Name: Ansible ec2_lc Testing VPC + tenancy: default + register: testing_vpc + +- name: Create internet gateway for use in testing + ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: present + tags: + Name: Ansible ec2_lc Testing gateway + register: igw + +- name: Create default subnet in zone A + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.32.0/24 + az: "{{ aws_region }}a" + resource_tags: + Name: "{{ resource_prefix }}-subnet-a" + register: testing_subnet_a + +- name: Create secondary subnet in zone B + ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.22.33.0/24 + az: "{{ aws_region }}b" + resource_tags: + Name: "{{ resource_prefix }}-subnet-b" + register: testing_subnet_b + +- name: create routing rules + ec2_vpc_route_table: + vpc_id: "{{ testing_vpc.vpc.id }}" + tags: + created: "{{ resource_prefix }}-route" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ testing_subnet_a.subnet.id }}" + - "{{ testing_subnet_b.subnet.id }}" + +- name: create a security group with the vpc + ec2_group: + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + register: sg diff --git a/tests/integration/targets/ec2_placement_group/tasks/main.yml b/tests/integration/targets/ec2_placement_group/tasks/main.yml new file mode 100644 index 00000000000..91fd9497c12 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/tasks/main.yml @@ -0,0 +1,408 @@ +- name: run ec2_placement_group tests + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + vars: + placement_group_names: [] + + block: + + - name: set up environment for testing. + include_tasks: env_setup.yml + + - name: Create a placement group 1 - check_mode + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: present + check_mode: true + register: pg_1_create_check_mode + + - assert: + that: + - pg_1_create_check_mode is changed + - pg_1_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create_check_mode.placement_group.state == "DryRun" + - '"ec2:CreatePlacementGroup" in pg_1_create_check_mode.resource_actions' + + - name: Create a placement group 1 + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: present + register: pg_1_create + + - set_fact: + placement_group_names: "{{ placement_group_names + [pg_1_create.placement_group.name] }}" + + - assert: + that: + - pg_1_create is changed + - pg_1_create.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" in pg_1_create.resource_actions' + + - name: Gather information about placement group 1 + community.aws.ec2_placement_group_info: + names: + - '{{ resource_prefix }}-pg1' + register: pg_1_info_result + + - assert: + that: + - pg_1_info_result is not changed + - pg_1_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg1' + - pg_1_info_result.placement_groups[0].state == "available" + - pg_1_info_result.placement_groups[0].strategy == "cluster" + - '"ec2:DescribePlacementGroups" in pg_1_info_result.resource_actions' + + - name: Create a placement group 1 - Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: present + register: pg_1_create + + - assert: + that: + - pg_1_create is not changed + - pg_1_create.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" not in pg_1_create.resource_actions' + + - name: Create a placement group 1 - check_mode Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: present + check_mode: true + register: pg_1_create_check_mode_idem + + - assert: + that: + - pg_1_create_check_mode_idem is not changed + - pg_1_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg1' + - pg_1_create_check_mode_idem.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" not in pg_1_create_check_mode_idem.resource_actions' + + - name: Create a placement group 2 - check_mode + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: present + strategy: spread + check_mode: true + register: pg_2_create_check_mode + + - assert: + that: + - pg_2_create_check_mode is changed + - pg_2_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create_check_mode.placement_group.state == "DryRun" + - '"ec2:CreatePlacementGroup" in pg_2_create_check_mode.resource_actions' + + - name: Create a placement group 2 with spread strategy + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: present + strategy: spread + register: pg_2_create + + - assert: + that: + - pg_2_create is changed + - pg_2_create.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" in pg_2_create.resource_actions' + + - set_fact: + placement_group_names: "{{ placement_group_names + [pg_2_create.placement_group.name] }}" + + - name: Gather information about placement group 2 + community.aws.ec2_placement_group_info: + names: + - '{{ resource_prefix }}-pg2' + register: pg_2_info_result + + - assert: + that: + - pg_2_info_result is not changed + - pg_2_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg2' + - pg_2_info_result.placement_groups[0].state == "available" + - pg_2_info_result.placement_groups[0].strategy == "spread" + - '"ec2:DescribePlacementGroups" in pg_2_info_result.resource_actions' + + - name: Create a placement group 2 with spread strategy - Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: present + strategy: spread + register: pg_2_create + + - assert: + that: + - pg_2_create is not changed + - pg_2_create.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" not in pg_2_create.resource_actions' + + - name: Create a placement group 2 - check_mode Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: present + strategy: spread + check_mode: true + register: pg_2_create_check_mode_idem + + - assert: + that: + - pg_2_create_check_mode_idem is not changed + - pg_2_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg2' + - pg_2_create_check_mode_idem.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" not in pg_2_create_check_mode_idem.resource_actions' + + - name: Create a placement group 3 - check_mode + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: present + strategy: partition + partition_count: 4 + check_mode: true + register: pg_3_create_check_mode + + - assert: + that: + - pg_3_create_check_mode is changed + - pg_3_create_check_mode.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create_check_mode.placement_group.state == "DryRun" + - '"ec2:CreatePlacementGroup" in pg_3_create_check_mode.resource_actions' + + - name: Create a placement group 3 with Partition strategy + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: present + strategy: partition + partition_count: 4 + register: pg_3_create + + - assert: + that: + - pg_3_create is changed + - pg_3_create.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" in pg_3_create.resource_actions' + + - set_fact: + placement_group_names: "{{ placement_group_names + [pg_3_create.placement_group.name] }}" + + + - name: Gather information about placement group 3 + community.aws.ec2_placement_group_info: + names: + - '{{ resource_prefix }}-pg3' + register: pg_3_info_result + + - assert: + that: + - pg_3_info_result is not changed + - pg_3_info_result.placement_groups[0].name == '{{ resource_prefix }}-pg3' + - pg_3_info_result.placement_groups[0].state == "available" + - pg_3_info_result.placement_groups[0].strategy == "partition" + - '"ec2:DescribePlacementGroups" in pg_3_info_result.resource_actions' + + - name: Create a placement group 3 with Partition strategy - Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: present + strategy: partition + partition_count: 4 + register: pg_3_create + + - assert: + that: + - pg_3_create is not changed + - pg_3_create.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" not in pg_3_create.resource_actions' + + - name: Create a placement group 3 - check_mode Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: present + strategy: partition + partition_count: 4 + check_mode: true + register: pg_3_create_check_mode_idem + + - assert: + that: + - pg_3_create_check_mode_idem is not changed + - pg_3_create_check_mode_idem.placement_group.name == '{{ resource_prefix }}-pg3' + - pg_3_create_check_mode_idem.placement_group.state == "available" + - '"ec2:CreatePlacementGroup" not in pg_3_create_check_mode_idem.resource_actions' + + - name: List all placement groups. + community.aws.ec2_placement_group_info: + register: all_ec2_placement_groups + +# Delete Placement Group ========================================== + + # On using check_mode for delete placement group operation + # If operation would have succeeded, the error response is DryRunOperation. + # Otherwise, it is UnauthorizedOperation . + - name: Delete a placement group 1 - check_mode + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: absent + check_mode: true + register: pg_1_delete_check_mode + ignore_errors: true + + - assert: + that: + - pg_1_delete_check_mode is not changed + - pg_1_delete_check_mode.error.code == 'DryRunOperation' + - '"ec2:DeletePlacementGroup" in pg_1_delete_check_mode.resource_actions' + + - name: Delete a placement group 1 + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: absent + register: pg_1_delete + + - assert: + that: + - pg_1_delete is changed + - '"ec2:DeletePlacementGroup" in pg_1_delete.resource_actions' + + - name: Delete a placement group 1 - Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: absent + register: pg_1_delete + + - assert: + that: + - pg_1_delete is not changed + - '"ec2:DeletePlacementGroup" not in pg_1_delete.resource_actions' + + - name: Delete a placement group 1 - check_mode Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg1' + state: absent + check_mode: true + register: pg_1_delete_check_mode_idem + ignore_errors: true + + - assert: + that: + - pg_1_delete_check_mode_idem is not changed + - '"ec2:DeletePlacementGroup" not in pg_1_delete_check_mode_idem.resource_actions' + + - name: Delete a placement group 2 - check_mode + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: absent + check_mode: true + register: pg_2_delete_check_mode + ignore_errors: true + + - assert: + that: + - pg_2_delete_check_mode is not changed + - pg_2_delete_check_mode.error.code == 'DryRunOperation' + - '"ec2:DeletePlacementGroup" in pg_2_delete_check_mode.resource_actions' + + - name: Delete a placement group 2 + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: absent + register: pg_2_delete + + - assert: + that: + - pg_2_delete is changed + - '"ec2:DeletePlacementGroup" in pg_2_delete.resource_actions' + + - name: Delete a placement group 2 - Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: absent + register: pg_2_delete + + - assert: + that: + - pg_2_delete is not changed + - '"ec2:DeletePlacementGroup" not in pg_2_delete.resource_actions' + + - name: Delete a placement group 2 - check_mode Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg2' + state: absent + check_mode: true + register: pg_2_delete_check_mode_idem + ignore_errors: true + + - assert: + that: + - pg_2_delete_check_mode_idem is not changed + - '"ec2:DeletePlacementGroup" not in pg_2_delete_check_mode_idem.resource_actions' + + - name: Delete a placement group 3 - check_mode + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: absent + check_mode: true + register: pg_3_delete_check_mode + ignore_errors: true + + - assert: + that: + - pg_3_delete_check_mode is not changed + - pg_3_delete_check_mode.error.code == 'DryRunOperation' + - '"ec2:DeletePlacementGroup" in pg_3_delete_check_mode.resource_actions' + + - name: Delete a placement group 3 + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: absent + register: pg_3_delete + + - assert: + that: + - pg_3_delete is changed + - '"ec2:DeletePlacementGroup" in pg_3_delete.resource_actions' + + - name: Delete a placement group 3 - Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: absent + register: pg_3_delete + + - assert: + that: + - pg_3_delete is not changed + - '"ec2:DeletePlacementGroup" not in pg_3_delete.resource_actions' + + - name: Delete a placement group 3 - check_mode Idempotency + community.aws.ec2_placement_group: + name: '{{ resource_prefix }}-pg3' + state: absent + check_mode: true + register: pg_3_delete_check_mode_idem + ignore_errors: true + + - assert: + that: + - pg_3_delete_check_mode_idem is not changed + - '"ec2:DeletePlacementGroup" not in pg_3_delete_check_mode_idem.resource_actions' + + always: + + - name: Make sure placement groups created during test are deleted + community.aws.ec2_placement_group: + name: '{{ item }}' + state: absent + with_items: '{{ placement_group_names }}' + + - include_tasks: env_cleanup.yml diff --git a/tests/integration/targets/ec2_placement_group/vars/main.yml b/tests/integration/targets/ec2_placement_group/vars/main.yml new file mode 100644 index 00000000000..ed97d539c09 --- /dev/null +++ b/tests/integration/targets/ec2_placement_group/vars/main.yml @@ -0,0 +1 @@ +--- From 15d69c91b1d40b0dfb1c98f11302f4f960864111 Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Mon, 7 Feb 2022 17:40:48 +0000 Subject: [PATCH 21/31] Add dynamodb table class support (#880) Add dynamodb table class support SUMMARY Add support for defining a TableClass on DynamoDB tables. TableClass was introduced as part of botocore version 1.23.18 https://github.com/boto/botocore/blob/develop/CHANGELOG.rst#12318 Fixes: #829 ISSUE TYPE Feature Pull Request COMPONENT NAME dynamodb_table ADDITIONAL INFORMATION https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.create_table https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.update_table Reviewed-by: Alina Buzachis Reviewed-by: Mark Woolley Reviewed-by: Jill R --- .../fragments/880-add-table-class-param.yml | 2 + plugins/modules/dynamodb_table.py | 70 +++++++++++++++++-- .../targets/dynamodb_table/meta/main.yml | 4 +- .../targets/dynamodb_table/tasks/main.yml | 52 +++++++++++++- 4 files changed, 122 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/880-add-table-class-param.yml diff --git a/changelogs/fragments/880-add-table-class-param.yml b/changelogs/fragments/880-add-table-class-param.yml new file mode 100644 index 00000000000..21d08216cd6 --- /dev/null +++ b/changelogs/fragments/880-add-table-class-param.yml @@ -0,0 +1,2 @@ +minor_changes: + - dynamodb_table - the ``table_class`` parameter has been added (https://github.com/ansible-collections/community.aws/pull/880). diff --git a/plugins/modules/dynamodb_table.py b/plugins/modules/dynamodb_table.py index 1ea4391223c..839178256aa 100644 --- a/plugins/modules/dynamodb_table.py +++ b/plugins/modules/dynamodb_table.py @@ -121,6 +121,13 @@ default: [] type: list elements: dict + table_class: + description: + - The class of the table. + - Requires at least botocore version 1.23.18. + choices: ['STANDARD', 'STANDARD_INFREQUENT_ACCESS'] + type: str + version_added: 3.1.0 tags: description: - A hash/dictionary of tags to add to the new instance or for starting/stopping instance by tag. @@ -201,11 +208,49 @@ ''' RETURN = r''' +table: + description: The returned table params from the describe API call. + returned: success + type: complex + contains: {} + sample: { + "arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table", + "attribute_definitions": [ + { + "attribute_name": "id", + "attribute_type": "N" + } + ], + "billing_mode": "PROVISIONED", + "creation_date_time": "2022-02-04T13:36:01.578000+00:00", + "id": "533b45fe-0870-4b66-9b00-d2afcfe96f19", + "item_count": 0, + "key_schema": [ + { + "attribute_name": "id", + "key_type": "HASH" + } + ], + "name": "ansible-test-14482047-alinas-mbp", + "provisioned_throughput": { + "number_of_decreases_today": 0, + "read_capacity_units": 1, + "write_capacity_units": 1 + }, + "size": 0, + "status": "ACTIVE", + "table_arn": "arn:aws:dynamodb:us-east-1:721066863947:table/ansible-test-table", + "table_id": "533b45fe-0870-4b66-9b00-d2afcfe96f19", + "table_name": "ansible-test-table", + "table_size_bytes": 0, + "table_status": "ACTIVE", + "tags": {} + } table_status: - description: The current status of the table. - returned: success - type: str - sample: ACTIVE + description: The current status of the table. + returned: success + type: str + sample: ACTIVE ''' try: @@ -410,6 +455,7 @@ def compatability_results(current_table): billing_mode=billing_mode, region=module.region, table_name=current_table.get('table_name', None), + table_class=current_table.get('table_class_summary', {}).get('table_class', None), table_status=current_table.get('table_status', None), tags=current_table.get('tags', {}), ) @@ -452,6 +498,9 @@ def get_dynamodb_table(): table['size'] = table['table_size_bytes'] table['tags'] = tags + if 'table_class_summary' in table: + table['table_class'] = table['table_class_summary']['table_class'] + # billing_mode_summary doesn't always seem to be set but is always set for PAY_PER_REQUEST # and when updating the billing_mode if 'billing_mode_summary' in table: @@ -753,6 +802,7 @@ def _update_table(current_table): changes = dict() additional_global_index_changes = list() + # Get throughput / billing_mode changes throughput_changes = _throughput_changes(current_table) if throughput_changes: changes['ProvisionedThroughput'] = throughput_changes @@ -766,6 +816,11 @@ def _update_table(current_table): if current_billing_mode != new_billing_mode: changes['BillingMode'] = new_billing_mode + # Update table_class use exisiting if none is defined + if module.params.get('table_class'): + if module.params.get('table_class') != current_table.get('table_class'): + changes['TableClass'] = module.params.get('table_class') + global_index_changes = _global_index_changes(current_table) if global_index_changes: changes['GlobalSecondaryIndexUpdates'] = global_index_changes @@ -868,6 +923,7 @@ def update_table(current_table): def create_table(): table_name = module.params.get('name') + table_class = module.params.get('table_class') hash_key_name = module.params.get('hash_key_name') billing_mode = module.params.get('billing_mode') @@ -901,6 +957,8 @@ def create_table(): # SSESpecification, ) + if table_class: + params['TableClass'] = table_class if billing_mode == "PROVISIONED": params['ProvisionedThroughput'] = throughput if local_indexes: @@ -982,6 +1040,7 @@ def main(): read_capacity=dict(type='int'), write_capacity=dict(type='int'), indexes=dict(default=[], type='list', elements='dict', options=index_options), + table_class=dict(type='str', choices=['STANDARD', 'STANDARD_INFREQUENT_ACCESS']), tags=dict(type='dict'), purge_tags=dict(type='bool', default=True), wait=dict(type='bool', default=True), @@ -999,6 +1058,9 @@ def main(): ) client = module.client('dynamodb', retry_decorator=retry_decorator) + if module.params.get('table_class'): + module.require_botocore_at_least('1.23.18', reason='to set table_class') + current_table = get_dynamodb_table() changed = False table = None diff --git a/tests/integration/targets/dynamodb_table/meta/main.yml b/tests/integration/targets/dynamodb_table/meta/main.yml index 07faa217762..504e72117b6 100644 --- a/tests/integration/targets/dynamodb_table/meta/main.yml +++ b/tests/integration/targets/dynamodb_table/meta/main.yml @@ -1,2 +1,4 @@ dependencies: - - prepare_tests + - role: setup_botocore_pip + vars: + botocore_version: "1.23.18" diff --git a/tests/integration/targets/dynamodb_table/tasks/main.yml b/tests/integration/targets/dynamodb_table/tasks/main.yml index cd41e031d64..21c7f465b20 100644 --- a/tests/integration/targets/dynamodb_table/tasks/main.yml +++ b/tests/integration/targets/dynamodb_table/tasks/main.yml @@ -574,7 +574,6 @@ - delete_table is not changed # ============================================== - - name: Create complex table - check_mode dynamodb_table: state: present @@ -585,9 +584,12 @@ range_key_type: "{{ range_index_type }}" read_capacity: 3 write_capacity: 3 + table_class: "STANDARD_INFREQUENT_ACCESS" tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" check_mode: True - name: Check results - Create complex table - check_mode @@ -606,9 +608,12 @@ range_key_type: "{{ range_index_type }}" read_capacity: 3 write_capacity: 3 + table_class: "STANDARD_INFREQUENT_ACCESS" tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: Check results - Create complex table assert: @@ -633,6 +638,7 @@ - create_complex_table.range_key_type == range_index_type - create_complex_table.read_capacity == 3 - create_complex_table.table_name == table_name + - create_complex_table.table_class == "STANDARD_INFREQUENT_ACCESS" - create_complex_table.write_capacity == 3 - create_complex_table.tags == tags_default @@ -646,9 +652,12 @@ range_key_type: "{{ range_index_type }}" read_capacity: 3 write_capacity: 3 + table_class: "STANDARD_INFREQUENT_ACCESS" tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" check_mode: True - name: Check results - Create complex table - idempotent - check_mode @@ -667,9 +676,12 @@ range_key_type: "{{ range_index_type }}" read_capacity: 3 write_capacity: 3 + table_class: "STANDARD_INFREQUENT_ACCESS" tags: "{{ tags_default }}" indexes: "{{ indexes }}" register: create_complex_table + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: Check results - Create complex table - idempotent assert: @@ -694,10 +706,48 @@ - create_complex_table.range_key_type == range_index_type - create_complex_table.read_capacity == 3 - create_complex_table.table_name == table_name + - create_complex_table.table_class == "STANDARD_INFREQUENT_ACCESS" - create_complex_table.write_capacity == 3 - create_complex_table.tags == tags_default # ============================================== + # Update table class on exisiting table + + - name: Update table class - check_mode + dynamodb_table: + state: present + name: "{{ table_name }}" + table_class: "STANDARD" + register: update_class + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + check_mode: True + + - name: Check results - Update table class - check_mode + assert: + that: + - update_class is successful + - update_class is changed + + - name: Update table class + dynamodb_table: + state: present + name: "{{ table_name }}" + table_class: "STANDARD" + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + register: update_class + + - name: Check results - Update table class + assert: + that: + - update_class is successful + - update_class is changed + - update_class.table_name == table_name + - update_class.table_class == "STANDARD" + + # ============================================== + # Update table index on exisiting table - name: Update table update index - check_mode dynamodb_table: From 239136bc63e2d771eff9af0e0108ee50956a845f Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 9 Feb 2022 17:44:52 -0500 Subject: [PATCH 22/31] add check_mode for elb_application_lb* & refactor integration tests (#894) add check_mode for elb_application_lb* & refactor integration tests SUMMARY Add check_mode support for elb_application_lb* & refactor integration tests. ISSUE TYPE Feature Pull Request COMPONENT NAME elb_application_lb elb_application_lb_info Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Jill R Reviewed-by: Mark Woolley --- .../894-add-check_mode-elb_application_lb.yml | 3 + plugins/modules/elb_application_lb.py | 252 ++-- plugins/modules/elb_application_lb_info.py | 135 +- .../targets/elb_application_lb/aliases | 1 + .../elb_application_lb/defaults/main.yml | 10 + .../elb_application_lb/tasks/full_test.yml | 186 --- .../targets/elb_application_lb/tasks/main.yml | 1170 ++++++++++++++++- .../tasks/test_alb_bad_listener_options.yml | 68 - .../test_alb_ip_address_type_options.yml | 93 -- .../tasks/test_alb_tags.yml | 78 -- .../tasks/test_alb_with_asg.yml | 73 - .../tasks/test_creating_alb.yml | 41 - .../tasks/test_deleting_alb.yml | 37 - .../tasks/test_modifying_alb_listeners.yml | 222 ---- .../tasks/test_multiple_actions.yml | 447 ------- .../targets/elb_application_lb_info/aliases | 1 - .../elb_application_lb_info/defaults/main.yml | 4 - .../elb_application_lb_info/meta/main.yml | 2 - .../tasks/full_test.yml | 11 - .../elb_application_lb_info/tasks/main.yml | 11 - .../elb_application_lb_info/tasks/setup.yml | 84 -- .../tasks/teardown.yml | 83 -- .../tasks/test_elb_application_lb_info.yml | 41 - 23 files changed, 1441 insertions(+), 1612 deletions(-) create mode 100644 changelogs/fragments/894-add-check_mode-elb_application_lb.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/full_test.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_alb_ip_address_type_options.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml delete mode 100644 tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/aliases delete mode 100644 tests/integration/targets/elb_application_lb_info/defaults/main.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/meta/main.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/tasks/full_test.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/tasks/main.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/tasks/setup.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/tasks/teardown.yml delete mode 100644 tests/integration/targets/elb_application_lb_info/tasks/test_elb_application_lb_info.yml diff --git a/changelogs/fragments/894-add-check_mode-elb_application_lb.yml b/changelogs/fragments/894-add-check_mode-elb_application_lb.yml new file mode 100644 index 00000000000..4c6a4dd935b --- /dev/null +++ b/changelogs/fragments/894-add-check_mode-elb_application_lb.yml @@ -0,0 +1,3 @@ +minor_changes: + - elb_application_lb - add check_mode support and refactor integration tests (https://github.com/ansible-collections/community.aws/pull/894) + - elb_application_lb_info - update documentation and refactor integration tests (https://github.com/ansible-collections/community.aws/pull/894) \ No newline at end of file diff --git a/plugins/modules/elb_application_lb.py b/plugins/modules/elb_application_lb.py index 4b547ace1c2..32c0f28bd95 100644 --- a/plugins/modules/elb_application_lb.py +++ b/plugins/modules/elb_application_lb.py @@ -48,7 +48,7 @@ type: str deletion_protection: description: - - Indicates whether deletion protection for the ELB is enabled. + - Indicates whether deletion protection for the ALB is enabled. - Defaults to C(false). type: bool http2: @@ -62,7 +62,7 @@ type: int listeners: description: - - A list of dicts containing listeners to attach to the ELB. See examples for detail of the dict required. Note that listener keys + - A list of dicts containing listeners to attach to the ALB. See examples for detail of the dict required. Note that listener keys are CamelCased. type: list elements: dict @@ -123,7 +123,7 @@ type: str purge_listeners: description: - - If C(yes), existing listeners will be purged from the ELB to match exactly what is defined by I(listeners) parameter. + - If C(yes), existing listeners will be purged from the ALB to match exactly what is defined by I(listeners) parameter. - If the I(listeners) parameter is not set then listeners will not be modified. default: yes type: bool @@ -149,7 +149,7 @@ elements: str scheme: description: - - Internet-facing or internal load balancer. An ELB scheme can not be modified after creation. + - Internet-facing or internal load balancer. An ALB scheme can not be modified after creation. default: internet-facing choices: [ 'internet-facing', 'internal' ] type: str @@ -195,9 +195,9 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. -# Create an ELB and attach a listener +# Create an ALB and attach a listener - community.aws.elb_application_lb: - name: myelb + name: myalb security_groups: - sg-12345678 - my-sec-group @@ -216,12 +216,12 @@ TargetGroupName: # Required. The name of the target group state: present -# Create an ELB and attach a listener with logging enabled +# Create an ALB and attach a listener with logging enabled - community.aws.elb_application_lb: access_logs_enabled: yes access_logs_s3_bucket: mybucket access_logs_s3_prefix: "logs" - name: myelb + name: myalb security_groups: - sg-12345678 - my-sec-group @@ -303,9 +303,9 @@ Type: forward state: present -# Remove an ELB +# Remove an ALB - community.aws.elb_application_lb: - name: myelb + name: myalb state: absent ''' @@ -315,27 +315,32 @@ description: The name of the S3 bucket for the access logs. returned: when state is present type: str - sample: mys3bucket + sample: "mys3bucket" access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. returned: when state is present - type: str + type: bool sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. returned: when state is present type: str - sample: my/logs + sample: "my/logs" availability_zones: description: The Availability Zones for the load balancer. returned: when state is present type: list - sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" + sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] canonical_hosted_zone_id: description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. returned: when state is present type: str - sample: ABCDEF12345678 + sample: "ABCDEF12345678" +changed: + description: Whether an ALB was created/updated/deleted + returned: always + type: bool + sample: true created_time: description: The date and time the load balancer was created. returned: when state is present @@ -344,23 +349,23 @@ deletion_protection_enabled: description: Indicates whether deletion protection is enabled. returned: when state is present - type: str + type: bool sample: true dns_name: description: The public DNS name of the load balancer. returned: when state is present type: str - sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com + sample: "internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com" idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. returned: when state is present type: int sample: 60 ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. + description: The type of IP addresses used by the subnets for the load balancer. returned: when state is present type: str - sample: ipv4 + sample: "ipv4" listeners: description: Information about the listeners. returned: when state is present @@ -385,7 +390,7 @@ description: The protocol for connections from clients to the load balancer. returned: when state is present type: str - sample: HTTPS + sample: "HTTPS" certificates: description: The SSL server certificate. returned: when state is present @@ -420,22 +425,42 @@ description: The Amazon Resource Name (ARN) of the load balancer. returned: when state is present type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-alb/001122334455" load_balancer_name: description: The name of the load balancer. returned: when state is present type: str - sample: my-elb + sample: "my-alb" routing_http2_enabled: description: Indicates whether HTTP/2 is enabled. returned: when state is present - type: str + type: bool sample: true +routing_http_desync_mitigation_mode: + description: Determines how the load balancer handles requests that might pose a security risk to an application. + returned: when state is present + type: str + sample: "defensive" +routing_http_drop_invalid_header_fields_enabled: + description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + returned: when state is present + type: bool + sample: false +routing_http_x_amzn_tls_version_and_cipher_suite_enabled: + description: Indicates whether the two headers are added to the client request before sending it to the target. + returned: when state is present + type: bool + sample: false +routing_http_xff_client_port_enabled: + description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + returned: when state is present + type: bool + sample: false scheme: description: Internet-facing or internal load balancer. returned: when state is present type: str - sample: internal + sample: "internal" security_groups: description: The IDs of the security groups for the load balancer. returned: when state is present @@ -445,29 +470,35 @@ description: The state of the load balancer. returned: when state is present type: dict - sample: "{'code': 'active'}" + sample: {'code': 'active'} tags: description: The tags attached to the load balancer. returned: when state is present type: dict - sample: "{ + sample: { 'Tag': 'Example' - }" + } type: description: The type of load balancer. returned: when state is present type: str - sample: application + sample: "application" vpc_id: description: The ID of the VPC for the load balancer. returned: when state is present type: str - sample: vpc-0011223344 + sample: "vpc-0011223344" +waf_fail_open_enabled: + description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + returned: when state is present + type: bool + sample: false ''' from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict, compare_aws_tags - +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( ApplicationLoadBalancer, ELBListener, @@ -478,134 +509,170 @@ from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules -def create_or_update_elb(elb_obj): - """Create ELB or modify main attributes. json_exit here""" - if elb_obj.elb: - # ELB exists so check subnets, security groups and tags match what has been passed - +def create_or_update_alb(alb_obj): + """Create ALB or modify main attributes. json_exit here""" + if alb_obj.elb: + # ALB exists so check subnets, security groups and tags match what has been passed # Subnets - if not elb_obj.compare_subnets(): - elb_obj.modify_subnets() + if not alb_obj.compare_subnets(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.modify_subnets() # Security Groups - if not elb_obj.compare_security_groups(): - elb_obj.modify_security_groups() + if not alb_obj.compare_security_groups(): + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.modify_security_groups() # Tags - only need to play with tags if tags parameter has been set to something - if elb_obj.tags is not None: + if alb_obj.tags is not None: + + tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']), + boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags) + + # Exit on check_mode + if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') # Delete necessary tags - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(elb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(elb_obj.tags), elb_obj.purge_tags) if tags_to_delete: - elb_obj.delete_tags(tags_to_delete) + alb_obj.delete_tags(tags_to_delete) # Add/update tags if tags_need_modify: - elb_obj.modify_tags() + alb_obj.modify_tags() else: # Create load balancer - elb_obj.create_elb() + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') + alb_obj.create_elb() - # ELB attributes - elb_obj.update_elb_attributes() - elb_obj.modify_elb_attributes() + # ALB attributes + alb_obj.update_elb_attributes() + alb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) - + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() + # Exit on check_mode + if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_add, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn']) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_modify, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn']) listener_obj.modify() listeners_obj.changed = True - # If listeners changed, mark ELB as changed + # If listeners changed, mark ALB as changed if listeners_obj.changed: - elb_obj.changed = True + alb_obj.changed = True # Rules of each listener for listener in listeners_obj.listeners: if 'Rules' in listener: - rules_obj = ELBListenerRules(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) - + rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() + # Exit on check_mode + if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete): + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + # Delete rules - if elb_obj.module.params['purge_rules']: + if alb_obj.module.params['purge_rules']: for rule in rules_to_delete: - rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) rule_obj.delete() - elb_obj.changed = True + alb_obj.changed = True # Add rules for rule in rules_to_add: - rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn) + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) rule_obj.create() - elb_obj.changed = True + alb_obj.changed = True # Modify rules for rule in rules_to_modify: - rule_obj = ELBListenerRule(elb_obj.connection, elb_obj.module, rule, rules_obj.listener_arn) + rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, rule, rules_obj.listener_arn) rule_obj.modify() - elb_obj.changed = True + alb_obj.changed = True + + # Update ALB ip address type only if option has been provided + if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'): + # Exit on check_mode + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') - # Update ELB ip address type only if option has been provided - if elb_obj.module.params.get('ip_address_type') is not None: - elb_obj.modify_ip_address_type(elb_obj.module.params.get('ip_address_type')) - # Get the ELB again - elb_obj.update() + alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type')) - # Get the ELB listeners again + # Exit on check_mode - no changes + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.') + + # Get the ALB again + alb_obj.update() + + # Get the ALB listeners again listeners_obj.update() - # Update the ELB attributes - elb_obj.update_elb_attributes() + # Update the ALB attributes + alb_obj.update_elb_attributes() # Convert to snake_case and merge in everything we want to return to the user - snaked_elb = camel_dict_to_snake_dict(elb_obj.elb) - snaked_elb.update(camel_dict_to_snake_dict(elb_obj.elb_attributes)) - snaked_elb['listeners'] = [] + snaked_alb = camel_dict_to_snake_dict(alb_obj.elb) + snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes)) + snaked_alb['listeners'] = [] for listener in listeners_obj.current_listeners: # For each listener, get listener rules - listener['rules'] = get_elb_listener_rules(elb_obj.connection, elb_obj.module, listener['ListenerArn']) - snaked_elb['listeners'].append(camel_dict_to_snake_dict(listener)) + listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn']) + snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_elb['tags'] = boto3_tag_list_to_ansible_dict(snaked_elb['tags']) + snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags']) # ip address type - snaked_elb['ip_address_type'] = elb_obj.get_elb_ip_address_type() + snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type() + + alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb) - elb_obj.module.exit_json(changed=elb_obj.changed, **snaked_elb) +def delete_alb(alb_obj): -def delete_elb(elb_obj): + if alb_obj.elb: - if elb_obj.elb: - listeners_obj = ELBListeners(elb_obj.connection, elb_obj.module, elb_obj.elb['LoadBalancerArn']) + # Exit on check_mode + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.') + + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: - listener_obj = ELBListener(elb_obj.connection, elb_obj.module, listener_to_delete, elb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) listener_obj.delete() - elb_obj.delete() + alb_obj.delete() - elb_obj.module.exit_json(changed=elb_obj.changed) + else: + + # Exit on check_mode - no changes + if alb_obj.module.check_mode: + alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.') + + alb_obj.module.exit_json(changed=alb_obj.changed) def main(): @@ -648,7 +715,8 @@ def main(): ], required_together=[ ['access_logs_enabled', 'access_logs_s3_bucket'] - ] + ], + supports_check_mode=True, ) # Quick check of listeners parameters @@ -668,12 +736,12 @@ def main(): state = module.params.get("state") - elb = ApplicationLoadBalancer(connection, connection_ec2, module) + alb = ApplicationLoadBalancer(connection, connection_ec2, module) if state == 'present': - create_or_update_elb(elb) - else: - delete_elb(elb) + create_or_update_alb(alb) + elif state == 'absent': + delete_alb(alb) if __name__ == '__main__': diff --git a/plugins/modules/elb_application_lb_info.py b/plugins/modules/elb_application_lb_info.py index ddac4fe9629..d1de312df11 100644 --- a/plugins/modules/elb_application_lb_info.py +++ b/plugins/modules/elb_application_lb_info.py @@ -10,9 +10,9 @@ --- module: elb_application_lb_info version_added: 1.0.0 -short_description: Gather information about application ELBs in AWS +short_description: Gather information about Application Load Balancers in AWS description: - - Gather information about application ELBs in AWS + - Gather information about Application Load Balancers in AWS author: Rob White (@wimnat) options: load_balancer_arns: @@ -37,19 +37,19 @@ EXAMPLES = r''' # Note: These examples do not set authentication details, see the AWS Guide for details. -- name: Gather information about all target groups +- name: Gather information about all ALBs community.aws.elb_application_lb_info: -- name: Gather information about the target group attached to a particular ELB +- name: Gather information about a particular ALB given its ARN community.aws.elb_application_lb_info: load_balancer_arns: - - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff" + - "arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-alb/aabbccddeeff" -- name: Gather information about a target groups named 'tg1' and 'tg2' +- name: Gather information about ALBs named 'alb1' and 'alb2' community.aws.elb_application_lb_info: names: - - elb1 - - elb2 + - alb1 + - alb2 - name: Gather information about specific ALB community.aws.elb_application_lb_info: @@ -69,55 +69,119 @@ access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. type: str - sample: mys3bucket + sample: "mys3bucket" access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. - type: str + type: bool sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. type: str - sample: /my/logs + sample: "my/logs" availability_zones: description: The Availability Zones for the load balancer. type: list - sample: "[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]" + sample: [{ "load_balancer_addresses": [], "subnet_id": "subnet-aabbccddff", "zone_name": "ap-southeast-2a" }] canonical_hosted_zone_id: description: The ID of the Amazon Route 53 hosted zone associated with the load balancer. type: str - sample: ABCDEF12345678 + sample: "ABCDEF12345678" created_time: description: The date and time the load balancer was created. type: str sample: "2015-02-12T02:14:02+00:00" deletion_protection_enabled: description: Indicates whether deletion protection is enabled. - type: str + type: bool sample: true dns_name: description: The public DNS name of the load balancer. type: str - sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com + sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com" idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. - type: str + type: int sample: 60 ip_address_type: - description: The type of IP addresses used by the subnets for the load balancer. + description: The type of IP addresses used by the subnets for the load balancer. type: str - sample: ipv4 + sample: "ipv4" + listeners: + description: Information about the listeners. + type: complex + contains: + listener_arn: + description: The Amazon Resource Name (ARN) of the listener. + type: str + sample: "" + load_balancer_arn: + description: The Amazon Resource Name (ARN) of the load balancer. + type: str + sample: "" + port: + description: The port on which the load balancer is listening. + type: int + sample: 80 + protocol: + description: The protocol for connections from clients to the load balancer. + type: str + sample: "HTTPS" + certificates: + description: The SSL server certificate. + type: complex + contains: + certificate_arn: + description: The Amazon Resource Name (ARN) of the certificate. + type: str + sample: "" + ssl_policy: + description: The security policy that defines which ciphers and protocols are supported. + type: str + sample: "" + default_actions: + description: The default actions for the listener. + type: str + contains: + type: + description: The type of action. + type: str + sample: "" + target_group_arn: + description: The Amazon Resource Name (ARN) of the target group. + type: str + sample: "" load_balancer_arn: description: The Amazon Resource Name (ARN) of the load balancer. type: str - sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455 + sample: "arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-alb/001122334455" load_balancer_name: description: The name of the load balancer. type: str - sample: my-elb + sample: "my-alb" + routing_http2_enabled: + description: Indicates whether HTTP/2 is enabled. + type: bool + sample: true + routing_http_desync_mitigation_mode: + description: Determines how the load balancer handles requests that might pose a security risk to an application. + type: str + sample: "defensive" + routing_http_drop_invalid_header_fields_enabled: + description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + type: bool + sample: false + routing_http_x_amzn_tls_version_and_cipher_suite_enabled: + description: Indicates whether the two headers are added to the client request before sending it to the target. + type: bool + sample: false + routing_http_xff_client_port_enabled: + description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + type: bool + sample: false scheme: description: Internet-facing or internal load balancer. type: str - sample: internal + sample: "internal" security_groups: description: The IDs of the security groups for the load balancer. type: list @@ -125,21 +189,26 @@ state: description: The state of the load balancer. type: dict - sample: "{'code': 'active'}" + sample: {'code': 'active'} tags: description: The tags attached to the load balancer. type: dict - sample: "{ + sample: { 'Tag': 'Example' - }" + } type: description: The type of load balancer. type: str - sample: application + sample: "application" vpc_id: description: The ID of the VPC for the load balancer. type: str - sample: vpc-0011223344 + sample: "vpc-0011223344" + waf_fail_open_enabled: + description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets + if it is unable to forward the request to AWS WAF. + type: bool + sample: false ''' try: @@ -154,12 +223,12 @@ from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -def get_elb_listeners(connection, module, elb_arn): +def get_alb_listeners(connection, module, alb_arn): try: - return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners'] + return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe elb listeners") + module.fail_json_aws(e, msg="Failed to describe alb listeners") def get_listener_rules(connection, module, listener_arn): @@ -218,17 +287,17 @@ def list_load_balancers(connection, module): module.fail_json_aws(e, msg="Failed to list load balancers") for load_balancer in load_balancers['LoadBalancers']: - # Get the attributes for each elb + # Get the attributes for each alb load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) - # Get the listeners for each elb - load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn']) + # Get the listeners for each alb + load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn']) # For each listener, get listener rules for listener in load_balancer['listeners']: listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) - # Get ELB ip address type + # Get ALB ip address type load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) # Turn the boto3 result in to ansible_friendly_snaked_names diff --git a/tests/integration/targets/elb_application_lb/aliases b/tests/integration/targets/elb_application_lb/aliases index 500826a1d4f..948352f2013 100644 --- a/tests/integration/targets/elb_application_lb/aliases +++ b/tests/integration/targets/elb_application_lb/aliases @@ -1,2 +1,3 @@ cloud/aws slow +elb_application_lb_info \ No newline at end of file diff --git a/tests/integration/targets/elb_application_lb/defaults/main.yml b/tests/integration/targets/elb_application_lb/defaults/main.yml index d0c601c6a04..20ced9d88ae 100644 --- a/tests/integration/targets/elb_application_lb/defaults/main.yml +++ b/tests/integration/targets/elb_application_lb/defaults/main.yml @@ -1,4 +1,14 @@ --- +# defaults file for elb_application_lb + resource_short: "{{ '%0.8x'%((16**8) | random(seed=resource_prefix)) }}" alb_name: "alb-test-{{ resource_short }}" tg_name: "alb-test-{{ resource_short }}" + +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' + +private_subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +private_subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' + +public_subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.3.0/24' +public_subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.4.0/24' \ No newline at end of file diff --git a/tests/integration/targets/elb_application_lb/tasks/full_test.yml b/tests/integration/targets/elb_application_lb/tasks/full_test.yml deleted file mode 100644 index e260d0f7f5c..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/full_test.yml +++ /dev/null @@ -1,186 +0,0 @@ -- name: elb_application_lb full_test - block: - # Setup - - name: create VPC - ec2_vpc_net: - cidr_block: 10.228.228.0/22 - name: '{{ resource_prefix }}_vpc' - state: present - ipv6_cidr: true - register: vpc - - name: create internet gateway - ec2_vpc_igw: - vpc_id: '{{ vpc.vpc.id }}' - state: present - tags: - Name: '{{ resource_prefix }}' - register: igw - - name: create private subnet - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ aws_region}}{{ item.az }}' - vpc_id: '{{ vpc.vpc.id }}' - state: present - tags: - Public: '{{ item.public|string }}' - Name: '{{ item.public|ternary(''public'', ''private'') }}-{{ item.az }}' - with_items: - - cidr: 10.228.230.0/24 - az: a - public: 'False' - - cidr: 10.228.231.0/24 - az: b - public: 'False' - - - name: create public subnets with ipv6 - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ aws_region}}{{ item.az }}' - vpc_id: '{{ vpc.vpc.id }}' - state: present - ipv6_cidr: '{{ item.vpc_ipv6_cidr }}' - tags: - Public: '{{ item.public|string }}' - Name: '{{ item.public|ternary(''public'', ''private'') }}-{{ item.az }}' - with_items: - - cidr: 10.228.228.0/24 - az: a - public: 'True' - vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('0::/56','0::/64') }}" - - cidr: 10.228.229.0/24 - az: b - public: 'True' - vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('0::/56','1::/64') }}" - - - ec2_vpc_subnet_info: - filters: - vpc-id: '{{ vpc.vpc.id }}' - register: vpc_subnets - - name: create list of subnet ids - set_fact: - alb_subnets: '{{ vpc_subnets|community.general.json_query(''subnets[?tags.Public == `True`].id'') }}' - private_subnets: '{{ vpc_subnets|community.general.json_query(''subnets[?tags.Public != `True`].id'') }}' - - name: create a route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Name: igw-route - Created: '{{ resource_prefix }}' - subnets: '{{ alb_subnets + private_subnets }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' - register: route_table - - ec2_group: - name: '{{ resource_prefix }}' - description: security group for Ansible ALB integration tests - state: present - vpc_id: '{{ vpc.vpc.id }}' - rules: - - proto: tcp - from_port: 1 - to_port: 65535 - cidr_ip: 0.0.0.0/0 - register: sec_group - - name: create a target group for testing - elb_target_group: - name: '{{ tg_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc.vpc.id }}' - state: present - register: tg - - # Run main tests - - include_tasks: test_alb_bad_listener_options.yml - - include_tasks: test_alb_ip_address_type_options.yml - - include_tasks: test_alb_tags.yml - - include_tasks: test_creating_alb.yml - - include_tasks: test_alb_with_asg.yml - - include_tasks: test_modifying_alb_listeners.yml - - include_tasks: test_deleting_alb.yml - - include_tasks: test_multiple_actions.yml - - always: - # Cleanup - - name: destroy ALB - elb_application_lb: - name: '{{ alb_name }}' - state: absent - wait: true - wait_timeout: 600 - ignore_errors: true - - - name: destroy target group if it was created - elb_target_group: - name: '{{ tg_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc.vpc.id }}' - state: absent - wait: true - wait_timeout: 600 - register: remove_tg - retries: 5 - delay: 3 - until: remove_tg is success - when: tg is defined - ignore_errors: true - - name: destroy sec group - ec2_group: - name: '{{ sec_group.group_name }}' - description: security group for Ansible ALB integration tests - state: absent - vpc_id: '{{ vpc.vpc.id }}' - register: remove_sg - retries: 10 - delay: 5 - until: remove_sg is success - ignore_errors: true - - name: remove route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - route_table_id: '{{ route_table.route_table.route_table_id }}' - lookup: id - state: absent - register: remove_rt - retries: 10 - delay: 5 - until: remove_rt is success - ignore_errors: true - - name: destroy subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - vpc_id: '{{ vpc.vpc.id }}' - state: absent - register: remove_subnet - retries: 10 - delay: 5 - until: remove_subnet is success - with_items: - - cidr: 10.228.228.0/24 - - cidr: 10.228.229.0/24 - - cidr: 10.228.230.0/24 - - cidr: 10.228.231.0/24 - ignore_errors: true - - name: destroy internet gateway - ec2_vpc_igw: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Name: '{{ resource_prefix }}' - state: absent - register: remove_igw - retries: 10 - delay: 5 - until: remove_igw is success - ignore_errors: true - - name: destroy VPC - ec2_vpc_net: - cidr_block: 10.228.228.0/22 - name: '{{ resource_prefix }}_vpc' - state: absent - register: remove_vpc - retries: 10 - delay: 5 - until: remove_vpc is success - ignore_errors: true diff --git a/tests/integration/targets/elb_application_lb/tasks/main.yml b/tests/integration/targets/elb_application_lb/tasks/main.yml index 90914288d88..169ef5b16dd 100644 --- a/tests/integration/targets/elb_application_lb/tasks/main.yml +++ b/tests/integration/targets/elb_application_lb/tasks/main.yml @@ -1,12 +1,1172 @@ - name: 'elb_application_lb integration tests' collections: - amazon.aws + module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create a test VPC + ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: '{{ resource_prefix }}_vpc' + state: present + ipv6_cidr: true + tags: + Name: elb_application_lb testing + ResourcePrefix: "{{ resource_prefix }}" + register: vpc + + - name: 'Set fact: VPC ID' + set_fact: + vpc_id: "{{ vpc.vpc.id }}" + + - name: Create an internet gateway + ec2_vpc_igw: + vpc_id: '{{ vpc_id }}' + state: present + tags: + Name: '{{ resource_prefix }}' + register: igw + + - name: Create private subnets + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ aws_region }}{{ item.az }}' + vpc_id: '{{ vpc_id }}' + state: present + tags: + Public: 'False' + Name: 'private-{{ item.az }}' + with_items: + - cidr: "{{ private_subnet_cidr_1 }}" + az: a + - cidr: "{{ private_subnet_cidr_2 }}" + az: b + register: private_subnets + + - name: Create public subnets with ipv6 + ec2_vpc_subnet: + cidr: '{{ item.cidr }}' + az: '{{ aws_region }}{{ item.az }}' + vpc_id: '{{ vpc_id }}' + state: present + ipv6_cidr: '{{ item.vpc_ipv6_cidr }}' + tags: + Public: 'True' + Name: 'public-{{ item.az }}' + with_items: + - cidr: "{{ public_subnet_cidr_1 }}" + az: a + vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('0::/56','0::/64') }}" + - cidr: "{{ public_subnet_cidr_2 }}" + az: b + vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('0::/56','1::/64') }}" + register: public_subnets + + - name: Create list of subnet ids + set_fact: + public_subnets: "{{ public_subnets.results | map(attribute='subnet') | map(attribute='id') }}" + private_subnets: "{{ private_subnets.results | map(attribute='subnet') | map(attribute='id') }}" + + - name: Create a route table + ec2_vpc_route_table: + vpc_id: '{{ vpc_id }}' + tags: + Name: igw-route + Created: '{{ resource_prefix }}' + subnets: '{{ public_subnets + private_subnets }}' + routes: + - dest: 0.0.0.0/0 + gateway_id: '{{ igw.gateway_id }}' + register: route_table + + - name: Create a security group for Ansible ALB integration tests + ec2_group: + name: '{{ resource_prefix }}' + description: security group for Ansible ALB integration tests + state: present + vpc_id: '{{ vpc_id }}' + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: 0.0.0.0/0 + register: sec_group + + - name: Create another security group for Ansible ALB integration tests + ec2_group: + name: '{{ resource_prefix }}-2' + description: security group for Ansible ALB integration tests + state: present + vpc_id: '{{ vpc_id }}' + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: 0.0.0.0/0 + register: sec_group2 + + - name: Create a target group for testing + elb_target_group: + name: '{{ tg_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: present + register: tg + + # ---------------- elb_application_lb tests --------------------------------------------------- + + - name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTPS + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ignore_errors: yes + register: alb + + - assert: + that: + - alb is failed + - alb.msg is match("'SslPolicy' is a required listener dict key when Protocol = HTTPS") + + - name: Create an ALB (invalid - didn't provide required listener options) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Port: 80 + ignore_errors: yes + register: alb + + - assert: + that: + - alb is failed + - alb.msg is match("missing required arguments:\ DefaultActions, Protocol found in listeners") + + - name: Create an ALB (invalid - invalid listener option type) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: "bad type" + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ignore_errors: yes + register: alb + + - assert: + that: + - alb is failed + - "'unable to convert to int' in alb.msg" + + - name: Create an ALB (invalid - invalid ip address type) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: "ip_addr_v4_v6" + ignore_errors: yes + register: alb + + - assert: + that: + - alb is failed + + # ------------------------------------------------------------------------------------------ + + - name: Create an ALB with ip address type - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'dualstack' + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have created ALB if not in check mode.') + + - name: Create an ALB with ip address type + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'dualstack' + register: alb + + - assert: + that: + - alb is changed + - alb.ip_address_type == 'dualstack' + - alb.listeners[0].rules | length == 1 + + - name: Create an ALB with ip address type (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'dualstack' + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with ip address type (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'dualstack' + register: alb + + - assert: + that: + - alb is not changed + - alb.ip_address_type == 'dualstack' + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different ip address type - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'ipv4' + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different ip address type + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'ipv4' + register: alb + + - assert: + that: + - alb is changed + - alb.ip_address_type == 'ipv4' + + - name: Update an ALB with different ip address type (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'ipv4' + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different ip address type (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: 'ipv4' + register: alb + + - assert: + that: + - alb is not changed + - alb.ip_address_type == 'ipv4' + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different listener by adding rule - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '1' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by adding rule + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '1' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 2 + - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + - name: Update an ALB with different listener by adding rule (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '1' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by adding rule (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '1' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 2 + - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different listener by modifying rule - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '2' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by modifying rule + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '2' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 2 + - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + - name: Update an ALB with different listener by modifying rule (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '2' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by modifying rule (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - '/test' + Priority: '2' + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 2 + - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB with different listener by deleting rule - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by deleting rule + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + + - assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 1 + - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + - name: Update an ALB with different listener by deleting rule (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by deleting rule (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + + - assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 1 + - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}" + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by deleting listener - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by deleting listener + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + + - assert: + that: + - alb is changed + - not alb.listeners + + - name: Update an ALB by deleting listener (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by deleting listener (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + + - assert: + that: + - alb is not changed + - not alb.listeners + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by adding tags - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}" + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by adding tags + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}" + register: alb + + - assert: + that: + - alb is changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' + + - name: Update an ALB by adding tags (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}" + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by adding tags (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}" + register: alb + + - assert: + that: + - alb is not changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by modifying tags - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}-2" + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by modifying tags + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}-2" + register: alb + + - assert: + that: + - alb is changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}' + + - name: Update an ALB by modifying tags (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}-2" + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by modifying tags (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: "ALB test {{ resource_prefix }}-2" + register: alb + + - assert: + that: + - alb is not changed + - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}' + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by removing tags - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by removing tags + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + + - assert: + that: + - alb is changed + - not alb.tags + + - name: Update an ALB by removing tags (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by removing tags (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + + - assert: + that: + - alb is not changed + - not alb.tags + + # ------------------------------------------------------------------------------------------ + + - name: Update an ALB by changing security group - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by changing security group + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + + - assert: + that: + - alb is changed + - alb.security_groups[0] == sec_group2.group_id + + - name: Update an ALB by changing security group (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by changing security group (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + + - assert: + that: + - alb is not changed + - alb.security_groups[0] == sec_group2.group_id + + # ------------------------------------------------------------------------------------------ + + - name: Ensure elb_application_lb_info supports check_mode + elb_application_lb_info: + register: alb_info + check_mode: yes + + - assert: + that: + - alb_info.load_balancers | length > 0 + + - name: Get ALB application info using no args + elb_application_lb_info: + register: alb_info + + - assert: + that: + - alb_info.load_balancers | length > 0 + + - name: Get ALB application info using load balancer arn + elb_application_lb_info: + load_balancer_arns: + - "{{ alb.load_balancer_arn }}" + register: alb_info + + - assert: + that: + - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + + - name: Get ALB application info using load balancer name + elb_application_lb_info: + names: + - "{{ alb.load_balancer_name }}" + register: alb_info + + - assert: + that: + - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + + # ------------------------------------------------------------------------------------------ + + - name: Delete an ALB - check_mode + elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + check_mode: yes + + - assert: + that: + - alb is changed + - alb.msg is match('Would have deleted ALB if not in check mode.') + + - name: Delete an ALB + elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + + - assert: + that: + - alb is changed + + - name: Delete an ALB (idempotence) - check_mode + elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + check_mode: yes + + - assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - ALB already absent.') + + - name: Delete an ALB (idempotence) + elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + + - assert: + that: + - alb is not changed + + # ----- Cleanup ------------------------------------------------------------------------------ + + always: + - name: Destroy ALB + elb_application_lb: + name: '{{ alb_name }}' + state: absent + wait: true + wait_timeout: 600 + ignore_errors: true + + - name: Destroy target group if it was created + elb_target_group: + name: '{{ tg_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: absent + wait: true + wait_timeout: 600 + register: remove_tg + retries: 5 + delay: 3 + until: remove_tg is success + when: tg is defined + ignore_errors: true + + - name: Destroy sec groups + ec2_group: + name: "{{ item }}" + description: security group for Ansible ALB integration tests + state: absent + vpc_id: '{{ vpc_id }}' + register: remove_sg + retries: 10 + delay: 5 + until: remove_sg is success + ignore_errors: true + with_items: + - "{{ resource_prefix }}" + - "{{ resource_prefix }}-2" + + - name: Destroy route table + ec2_vpc_route_table: + vpc_id: '{{ vpc_id }}' + route_table_id: '{{ route_table.route_table.route_table_id }}' + lookup: id + state: absent + register: remove_rt + retries: 10 + delay: 5 + until: remove_rt is success + ignore_errors: true + + - name: Destroy subnets + ec2_vpc_subnet: + cidr: "{{ item }}" + vpc_id: "{{ vpc_id }}" + state: absent + register: remove_subnet + retries: 10 + delay: 5 + until: remove_subnet is success + with_items: + - "{{ private_subnet_cidr_1 }}" + - "{{ private_subnet_cidr_2 }}" + - "{{ public_subnet_cidr_1 }}" + - "{{ public_subnet_cidr_2 }}" + ignore_errors: true + + - name: Destroy internet gateway + ec2_vpc_igw: + vpc_id: '{{ vpc_id }}' + tags: + Name: '{{ resource_prefix }}' + state: absent + register: remove_igw + retries: 10 + delay: 5 + until: remove_igw is success + ignore_errors: true - - include_tasks: full_test.yml + - name: Destroy VPC + ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}_vpc" + state: absent + register: remove_vpc + retries: 10 + delay: 5 + until: remove_vpc is success + ignore_errors: true diff --git a/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml b/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml deleted file mode 100644 index a811e3f3054..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_alb_bad_listener_options.yml +++ /dev/null @@ -1,68 +0,0 @@ -- block: - - - name: test creating an ALB with invalid listener options - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTPS - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - ignore_errors: yes - register: alb - - - assert: - that: - - alb is failed - - - name: test creating an ALB without providing required listener options - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Port: 80 - ignore_errors: yes - register: alb - - - assert: - that: - - alb is failed - - '"missing required arguments" in alb.msg' - - '"Protocol" in alb.msg' - - '"DefaultActions" in alb.msg' - - - name: test creating an ALB providing an invalid listener option type - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: "bad type" - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - ignore_errors: yes - register: alb - - - assert: - that: - - alb is failed - - "'unable to convert to int' in alb.msg" - - always: - # Cleanup - - name: destroy ALB if created - elb_application_lb: - name: '{{ alb_name }}' - state: absent - wait: true - wait_timeout: 600 - ignore_errors: true diff --git a/tests/integration/targets/elb_application_lb/tasks/test_alb_ip_address_type_options.yml b/tests/integration/targets/elb_application_lb/tasks/test_alb_ip_address_type_options.yml deleted file mode 100644 index 9249d1161c0..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_alb_ip_address_type_options.yml +++ /dev/null @@ -1,93 +0,0 @@ -- block: - - name: set elb name for ipv6 - set_fact: - elb_name_ipv6: "{{ alb_name ~ 'ipv6' }}" - - - name: test creating an ELB with invalid ip address type - elb_application_lb: - name: "{{ elb_name_ipv6 }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - ip_address_type: "ip_addr_v4_v6" - ignore_errors: yes - register: elb - - - assert: - that: - - elb is failed - - - name: test creating an ELB with dualstack ip adress type - elb_application_lb: - name: "{{ elb_name_ipv6 }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - ip_address_type: "dualstack" - register: elb - - - assert: - that: - - elb.ip_address_type == "dualstack" - - - name: test updating an ELB with ipv4 adress type - elb_application_lb: - name: "{{ elb_name_ipv6 }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - ip_address_type: "ipv4" - register: elb - - - assert: - that: - - elb.changed - - elb.ip_address_type == "ipv4" - - - name: test idempotence updating an ELB with ipv4 adress type - elb_application_lb: - name: "{{ elb_name_ipv6 }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - ip_address_type: "ipv4" - register: elb - - - assert: - that: - - not elb.changed - - elb.ip_address_type == "ipv4" - - always: - # Cleanup - - name: destroy ALB if created - elb_application_lb: - name: '{{ elb_name_ipv6 }}' - state: absent - wait: true - wait_timeout: 600 - ignore_errors: true diff --git a/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml b/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml deleted file mode 100644 index 06b6d0249cc..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_alb_tags.yml +++ /dev/null @@ -1,78 +0,0 @@ -- block: - - - name: create ALB with no listeners - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - register: alb - - - assert: - that: - - alb.changed - - - name: re-create ALB with no listeners - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - register: alb - - - assert: - that: - - not alb.changed - - - name: add tags to ALB - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - tags: - created_by: "ALB test {{ resource_prefix }}" - register: alb - - - assert: - that: - - alb.changed - - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' - - - name: remove tags from ALB - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - tags: {} - register: alb - - - assert: - that: - - alb.changed - - not alb.tags - - - name: test idempotence - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - tags: {} - register: alb - - - assert: - that: - - not alb.changed - - not alb.tags - - - name: destroy ALB with no listeners - elb_application_lb: - name: "{{ alb_name }}" - state: absent - register: alb - - - assert: - that: - - alb.changed diff --git a/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml b/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml deleted file mode 100644 index b066d88a210..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_alb_with_asg.yml +++ /dev/null @@ -1,73 +0,0 @@ -- block: - - - ec2_ami_info: - filters: - architecture: x86_64 - virtualization-type: hvm - root-device-type: ebs - name: "amzn-ami-hvm*" - owner-alias: "amazon" - register: amis - - - set_fact: - latest_amazon_linux: "{{ amis.images | sort(attribute='creation_date') | last }}" - - - ec2_asg: - state: absent - name: "{{ resource_prefix }}-webservers" - wait_timeout: 900 - - - ec2_lc: - name: "{{ resource_prefix }}-web-lcfg" - state: absent - - - name: Create launch config for testing - ec2_lc: - name: "{{ resource_prefix }}-web-lcfg" - assign_public_ip: true - image_id: "{{ latest_amazon_linux.image_id }}" - security_groups: "{{ sec_group.group_id }}" - instance_type: t2.medium - user_data: | - #!/bin/bash - set -x - yum update -y --nogpgcheck - yum install -y --nogpgcheck httpd - echo "Hello Ansiblings!" >> /var/www/html/index.html - service httpd start - volumes: - - device_name: /dev/xvda - volume_size: 10 - volume_type: gp2 - delete_on_termination: true - - - name: Create autoscaling group for app server fleet - ec2_asg: - name: "{{ resource_prefix }}-webservers" - vpc_zone_identifier: "{{ alb_subnets }}" - launch_config_name: "{{ resource_prefix }}-web-lcfg" - termination_policies: - - OldestLaunchConfiguration - - Default - health_check_period: 600 - health_check_type: EC2 - replace_all_instances: true - min_size: 0 - max_size: 2 - desired_capacity: 1 - wait_for_instances: true - target_group_arns: - - "{{ tg.target_group_arn }}" - - always: - - - ec2_asg: - state: absent - name: "{{ resource_prefix }}-webservers" - wait_timeout: 900 - ignore_errors: yes - - - ec2_lc: - name: "{{ resource_prefix }}-web-lcfg" - state: absent - ignore_errors: yes diff --git a/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml b/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml deleted file mode 100644 index f5e75ab3872..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_creating_alb.yml +++ /dev/null @@ -1,41 +0,0 @@ -- block: - - - name: create ALB with a listener - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - register: alb - - - assert: - that: - - alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules|length == 1 - - - name: test idempotence creating ALB with a listener - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - register: alb - - - assert: - that: - - not alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules|length == 1 diff --git a/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml b/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml deleted file mode 100644 index cf1335d6dbd..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_deleting_alb.yml +++ /dev/null @@ -1,37 +0,0 @@ -- block: - - - name: destroy ALB with listener - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: absent - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - wait: yes - wait_timeout: 300 - register: alb - - - name: test idempotence - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: absent - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - wait: yes - wait_timeout: 300 - register: alb - - - assert: - that: - - not alb.changed diff --git a/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml b/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml deleted file mode 100644 index 3cc8a857bca..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_modifying_alb_listeners.yml +++ /dev/null @@ -1,222 +0,0 @@ -- block: - - - name: add a rule to the listener - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - Rules: - - Conditions: - - Field: path-pattern - Values: - - '/test' - Priority: '1' - Actions: - - TargetGroupName: "{{ tg_name }}" - Type: forward - register: alb - - - assert: - that: - - alb.changed - - alb.listeners[0].rules|length == 2 - - - name: test replacing the rule with one with the same priority - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - purge_listeners: true - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - Rules: - - Conditions: - - Field: path-pattern - Values: - - '/new' - Priority: '1' - Actions: - - TargetGroupName: "{{ tg_name }}" - Type: forward - register: alb - - - assert: - that: - - alb.changed - - alb.listeners[0].rules|length == 2 - - - name: test the rule will not be removed without purge_listeners - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - register: alb - - - assert: - that: - - not alb.changed - - alb.listeners[0].rules|length == 2 - - - name: test a rule can be added and other rules will not be removed when purge_rules is no. - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - purge_rules: no - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - Rules: - - Conditions: - - Field: path-pattern - Values: - - '/new' - Priority: '2' - Actions: - - TargetGroupName: "{{ tg_name }}" - Type: forward - register: alb - - - assert: - that: - - alb.changed - - alb.listeners[0].rules|length == 3 - - - name: add a rule that uses the host header condition to the listener - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - purge_rules: no - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - Rules: - - Conditions: - - Field: host-header - Values: - - 'local.mydomain.com' - Priority: '3' - Actions: - - TargetGroupName: "{{ tg_name }}" - Type: forward - register: alb - - - assert: - that: - - alb.changed - - alb.listeners[0].rules|length == 4 - # - '{{ alb|community.general.json_query("listeners[].rules[].conditions[].host_header_config.values[]")|length == 1 }}' - - - name: test replacing the rule that uses the host header condition with multiple host header conditions - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - purge_rules: no - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - Rules: - - Conditions: - - Field: host-header - Values: - - 'local.mydomain.com' - - 'alternate.mydomain.com' - Priority: '3' - Actions: - - TargetGroupName: "{{ tg_name }}" - Type: forward - register: alb - - - assert: - that: - - alb.changed - - alb.listeners[0].rules|length == 4 - #- '{{ alb|community.general.json_query("listeners[].rules[].conditions[].host_header_config.values[]")|length == 2 }}' - - - name: remove the rule - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - purge_listeners: true - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - Rules: [] - register: alb - - - assert: - that: - - alb.changed - - alb.listeners[0].rules|length == 1 - - - name: remove listener from ALB - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: [] - register: alb - - - assert: - that: - - alb.changed - - not alb.listeners - - - name: add the listener to the ALB - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - register: alb - - - assert: - that: - - alb.changed - - alb.listeners|length == 1 - - alb.availability_zones|length == 2 diff --git a/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml b/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml deleted file mode 100644 index da56a98716b..00000000000 --- a/tests/integration/targets/elb_application_lb/tasks/test_multiple_actions.yml +++ /dev/null @@ -1,447 +0,0 @@ -- block: - - - name: register dummy OIDC config - set_fact: - AuthenticateOidcActionConfig: - AuthorizationEndpoint: "https://www.example.com/auth" - ClientId: "eeeeeeeeeeeeeeeeeeeeeeeeee" - ClientSecret: "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" - Issuer: "https://www.example.com/issuer" - OnUnauthenticatedRequest: "authenticate" - Scope: "openid" - SessionCookieName: "AWSELBAuthSessionCookie" - SessionTimeout: 604800 - TokenEndpoint: "https://www.example.com/token" - UserInfoEndpoint: "https://www.example.com/userinfo" - UseExistingClientSecret: true - - - name: register fixed response action - set_fact: - FixedResponseActionConfig: - ContentType: "text/plain" - MessageBody: "This is the page you're looking for" - StatusCode: "200" - - - name: register redirect action - set_fact: - RedirectActionConfig: - Host: "#{host}" - Path: "/example/redir" # or /#{path} - Port: "#{port}" - Protocol: "#{protocol}" - Query: "#{query}" - StatusCode: "HTTP_302" # or HTTP_301 - - - name: delete existing ALB to avoid target group association issues - elb_application_lb: - name: "{{ alb_name }}" - state: absent - wait: yes - wait_timeout: 600 - - - name: cleanup tg to avoid target group association issues - elb_target_group: - name: "{{ tg_name }}" - protocol: http - port: 80 - vpc_id: "{{ vpc.vpc.id }}" - state: absent - wait: yes - wait_timeout: 600 - register: cleanup_tg - retries: 5 - delay: 3 - until: cleanup_tg is success - - - name: recreate a target group - elb_target_group: - name: "{{ tg_name }}" - protocol: http - port: 80 - vpc_id: "{{ vpc.vpc.id }}" - state: present - register: tg - - - name: create ALB with redirect DefaultAction - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: redirect - RedirectConfig: "{{ RedirectActionConfig }}" - register: alb - - - assert: - that: - - alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules[0].actions|length == 1 - - alb.listeners[0].rules[0].actions[0].type == "redirect" - - - name: test idempotence with redirect DefaultAction - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: redirect - RedirectConfig: "{{ RedirectActionConfig }}" - register: alb - - - assert: - that: - - not alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules[0].actions|length == 1 - - alb.listeners[0].rules[0].actions[0].type == "redirect" - - - name: update ALB with fixed-response DefaultAction - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: fixed-response - FixedResponseConfig: "{{ FixedResponseActionConfig }}" - register: alb - - - assert: - that: - - alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules[0].actions|length == 1 - - alb.listeners[0].rules[0].actions[0].type == "fixed-response" - - - name: test idempotence with fixed-response DefaultAction - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: fixed-response - FixedResponseConfig: "{{ FixedResponseActionConfig }}" - register: alb - - - assert: - that: - - not alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules[0].actions|length == 1 - - alb.listeners[0].rules[0].actions[0].type == "fixed-response" - - - name: test multiple non-default rules - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: fixed-response - FixedResponseConfig: "{{ FixedResponseActionConfig }}" - Rules: - - Conditions: - - Field: http-header - HttpHeaderConfig: - HttpHeaderName: 'User-Agent' - Values: ['*Trident/7:0*rv:*'] - - Field: http-header - HttpHeaderConfig: - HttpHeaderName: 'X-Something' - Values: ['foobar'] - Priority: '1' - Actions: - - Type: fixed-response - FixedResponseConfig: - StatusCode: "200" - ContentType: "text/html" - MessageBody: "Hello World!" - - Conditions: - - Field: path-pattern - Values: - - "/forward-path/*" - Priority: 2 - Actions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - - Conditions: - - Field: path-pattern - Values: - - "/redirect-path/*" - Priority: 3 - Actions: - - Type: redirect - RedirectConfig: "{{ RedirectActionConfig }}" - - Conditions: - - Field: path-pattern - Values: - - "/fixed-response-path/" - Priority: 4 - Actions: - - Type: fixed-response - FixedResponseConfig: "{{ FixedResponseActionConfig }}" - register: alb - - - assert: - that: - - alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules|length == 5 ## defaultactions is included as a rule - - alb.listeners[0].rules[0].actions|length == 1 - - alb.listeners[0].rules[0].actions[0].type == "fixed-response" - - alb.listeners[0].rules[1].actions|length == 1 - - alb.listeners[0].rules[1].actions[0].type == "forward" - - alb.listeners[0].rules[2].actions|length == 1 - - alb.listeners[0].rules[2].actions[0].type == "redirect" - - alb.listeners[0].rules[3].actions|length == 1 - - alb.listeners[0].rules[3].actions[0].type == "fixed-response" - - - name: test idempotence multiple non-default rules - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: fixed-response - FixedResponseConfig: "{{ FixedResponseActionConfig }}" - Rules: - - Conditions: - - Field: http-header - HttpHeaderConfig: - HttpHeaderName: 'User-Agent' - Values: ['*Trident/7:0*rv:*'] - - Field: http-header - HttpHeaderConfig: - HttpHeaderName: 'X-Something' - Values: ['foobar'] - Priority: '1' - Actions: - - Type: fixed-response - FixedResponseConfig: - StatusCode: "200" - ContentType: "text/html" - MessageBody: "Hello World!" - - Conditions: - - Field: path-pattern - Values: - - "/forward-path/*" - Priority: 2 - Actions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - - Conditions: - - Field: path-pattern - Values: - - "/redirect-path/*" - Priority: 3 - Actions: - - Type: redirect - RedirectConfig: "{{ RedirectActionConfig }}" - - Conditions: - - Field: path-pattern - Values: - - "/fixed-response-path/" - Priority: 4 - Actions: - - Type: fixed-response - FixedResponseConfig: "{{ FixedResponseActionConfig }}" - register: alb - - - assert: - that: - - not alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules|length == 5 ## defaultactions is included as a rule - - alb.listeners[0].rules[0].actions|length == 1 - - alb.listeners[0].rules[0].actions[0].type == "fixed-response" - - alb.listeners[0].rules[1].actions|length == 1 - - alb.listeners[0].rules[1].actions[0].type == "forward" - - alb.listeners[0].rules[2].actions|length == 1 - - alb.listeners[0].rules[2].actions[0].type == "redirect" - - alb.listeners[0].rules[3].actions|length == 1 - - alb.listeners[0].rules[3].actions[0].type == "fixed-response" - - -# - name: test creating ALB with a default listener with multiple actions -# elb_application_lb: -# name: "{{ alb_name }}" -# subnets: "{{ alb_subnets }}" -# security_groups: "{{ sec_group.group_id }}" -# state: present -# listeners: -# - Protocol: HTTP -# Port: 80 -# DefaultActions: -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# register: alb -# -# - assert: -# that: -# - alb.listeners|length == 1 -# - alb.listeners[0].rules[0].actions|length == 2 -# -# - name: test changing order of actions -# elb_application_lb: -# name: "{{ alb_name }}" -# subnets: "{{ alb_subnets }}" -# security_groups: "{{ sec_group.group_id }}" -# state: present -# listeners: -# - Protocol: HTTP -# Port: 80 -# DefaultActions: -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# register: alb -# -# - assert: -# that: -# - not alb.changed -# - alb.listeners|length == 1 -# - alb.listeners[0].rules[0].actions|length == 2 -# -# - name: test non-default rule with multiple actions -# elb_application_lb: -# name: "{{ alb_name }}" -# subnets: "{{ alb_subnets }}" -# security_groups: "{{ sec_group.group_id }}" -# state: present -# listeners: -# - Protocol: HTTP -# Port: 80 -# DefaultActions: -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# Rules: -# - Conditions: -# - Field: path-pattern -# Values: -# - "*" -# Priority: 1 -# Actions: -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# register: alb -# -# - assert: -# that: -# - alb.changed -# - alb.listeners|length == 1 -# - alb.listeners[0].rules[0].actions|length == 2 -# - alb.listeners[0].rules[1].actions|length == 2 -# -# - name: test idempotency non-default rule with multiple actions -# elb_application_lb: -# name: "{{ alb_name }}" -# subnets: "{{ alb_subnets }}" -# security_groups: "{{ sec_group.group_id }}" -# state: present -# listeners: -# - Protocol: HTTP -# Port: 80 -# DefaultActions: -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# Rules: -# - Conditions: -# - Field: path-pattern -# Values: -# - "*" -# Priority: 1 -# Actions: -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# register: alb -# -# - assert: -# that: -# - not alb.changed -# - alb.listeners|length == 1 -# - alb.listeners[0].rules[0].actions|length == 2 -# - alb.listeners[0].rules[1].actions|length == 2 -# -# - name: test non-default rule action order change -# elb_application_lb: -# name: "{{ alb_name }}" -# subnets: "{{ alb_subnets }}" -# security_groups: "{{ sec_group.group_id }}" -# state: present -# listeners: -# - Protocol: HTTP -# Port: 80 -# DefaultActions: -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# Rules: -# - Conditions: -# - Field: path-pattern -# Values: -# - "*" -# Priority: 1 -# Actions: -# - Type: authenticate-oidc -# AuthenticateOidcConfig: "{{ AuthenticateOidcActionConfig }}" -# Order: 1 -# - Type: forward -# TargetGroupName: "{{ tg_name }}" -# Order: 2 -# register: alb -# -# - assert: -# that: -# - not alb.changed -# - alb.listeners|length == 1 -# - alb.listeners[0].rules[0].actions|length == 2 -# - alb.listeners[0].rules[1].actions|length == 2 diff --git a/tests/integration/targets/elb_application_lb_info/aliases b/tests/integration/targets/elb_application_lb_info/aliases deleted file mode 100644 index 4ef4b2067d0..00000000000 --- a/tests/integration/targets/elb_application_lb_info/aliases +++ /dev/null @@ -1 +0,0 @@ -cloud/aws diff --git a/tests/integration/targets/elb_application_lb_info/defaults/main.yml b/tests/integration/targets/elb_application_lb_info/defaults/main.yml deleted file mode 100644 index d0c601c6a04..00000000000 --- a/tests/integration/targets/elb_application_lb_info/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -resource_short: "{{ '%0.8x'%((16**8) | random(seed=resource_prefix)) }}" -alb_name: "alb-test-{{ resource_short }}" -tg_name: "alb-test-{{ resource_short }}" diff --git a/tests/integration/targets/elb_application_lb_info/meta/main.yml b/tests/integration/targets/elb_application_lb_info/meta/main.yml deleted file mode 100644 index 1810d4bec98..00000000000 --- a/tests/integration/targets/elb_application_lb_info/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -dependencies: - - setup_remote_tmp_dir diff --git a/tests/integration/targets/elb_application_lb_info/tasks/full_test.yml b/tests/integration/targets/elb_application_lb_info/tasks/full_test.yml deleted file mode 100644 index 7603a0454ab..00000000000 --- a/tests/integration/targets/elb_application_lb_info/tasks/full_test.yml +++ /dev/null @@ -1,11 +0,0 @@ -- name: elb_application_lb full_test - block: - # setup - - include_tasks: setup.yml - - # Run main tests - - include_tasks: test_elb_application_lb_info.yml - - always: - # Cleanup - - include_tasks: teardown.yml diff --git a/tests/integration/targets/elb_application_lb_info/tasks/main.yml b/tests/integration/targets/elb_application_lb_info/tasks/main.yml deleted file mode 100644 index 5d9eb4fe73f..00000000000 --- a/tests/integration/targets/elb_application_lb_info/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ -- name: 'elb_application_lb_info integration tests' - collections: - - amazon.aws - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - include_tasks: full_test.yml diff --git a/tests/integration/targets/elb_application_lb_info/tasks/setup.yml b/tests/integration/targets/elb_application_lb_info/tasks/setup.yml deleted file mode 100644 index 26289d230d0..00000000000 --- a/tests/integration/targets/elb_application_lb_info/tasks/setup.yml +++ /dev/null @@ -1,84 +0,0 @@ -- name: elb_application_lb_info setup - block: - - name: create VPC - ec2_vpc_net: - cidr_block: 10.228.228.0/22 - name: '{{ resource_prefix }}_vpc' - state: present - register: vpc - - - name: create internet gateway - ec2_vpc_igw: - vpc_id: '{{ vpc.vpc.id }}' - state: present - tags: - Name: '{{ resource_prefix }}' - register: igw - - - name: create public subnet - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ aws_region}}{{ item.az }}' - vpc_id: '{{ vpc.vpc.id }}' - state: present - tags: - Public: '{{ item.public|string }}' - Name: '{{ item.public|ternary(''public'', ''private'') }}-{{ item.az }}' - with_items: - - cidr: 10.228.228.0/24 - az: a - public: 'True' - - cidr: 10.228.229.0/24 - az: b - public: 'True' - - cidr: 10.228.230.0/24 - az: a - public: 'False' - - cidr: 10.228.231.0/24 - az: b - public: 'False' - register: subnets - - - ec2_vpc_subnet_info: - filters: - vpc-id: '{{ vpc.vpc.id }}' - register: vpc_subnets - - - name: create list of subnet ids - set_fact: - alb_subnets: "{{ ( vpc_subnets.subnets | selectattr('tags.Public', 'equalto', 'True') | map(attribute='id') | list ) }}" - private_subnets: "{{ ( vpc_subnets.subnets | rejectattr('tags.Public', 'equalto', 'True') | map(attribute='id') | list ) }}" - - - name: create a route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Name: igw-route - Created: '{{ resource_prefix }}' - subnets: '{{ alb_subnets + private_subnets }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' - register: route_table - - - ec2_group: - name: '{{ resource_prefix }}' - description: security group for Ansible ALB integration tests - state: present - vpc_id: '{{ vpc.vpc.id }}' - rules: - - proto: tcp - from_port: 1 - to_port: 65535 - cidr_ip: 0.0.0.0/0 - register: sec_group - - - name: create a target group for testing - elb_target_group: - name: '{{ tg_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc.vpc.id }}' - state: present - register: tg - diff --git a/tests/integration/targets/elb_application_lb_info/tasks/teardown.yml b/tests/integration/targets/elb_application_lb_info/tasks/teardown.yml deleted file mode 100644 index 24326e343a6..00000000000 --- a/tests/integration/targets/elb_application_lb_info/tasks/teardown.yml +++ /dev/null @@ -1,83 +0,0 @@ -- name: elb_application_lb_info teardown - block: - - name: destroy ALB - elb_application_lb: - name: '{{ alb_name }}' - state: absent - wait: true - wait_timeout: 600 - ignore_errors: true - - - name: destroy target group if it was created - elb_target_group: - name: '{{ tg_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc.vpc.id }}' - state: absent - wait: true - wait_timeout: 600 - register: remove_tg - retries: 5 - delay: 3 - until: remove_tg is success - when: tg is defined - ignore_errors: true - - name: destroy sec group - ec2_group: - name: '{{ sec_group.group_name }}' - description: security group for Ansible ALB integration tests - state: absent - vpc_id: '{{ vpc.vpc.id }}' - register: remove_sg - retries: 10 - delay: 5 - until: remove_sg is success - ignore_errors: true - - name: remove route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - route_table_id: '{{ route_table.route_table.route_table_id }}' - lookup: id - state: absent - register: remove_rt - retries: 10 - delay: 5 - until: remove_rt is success - ignore_errors: true - - name: destroy subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - vpc_id: '{{ vpc.vpc.id }}' - state: absent - register: remove_subnet - retries: 10 - delay: 5 - until: remove_subnet is success - with_items: - - cidr: 10.228.228.0/24 - - cidr: 10.228.229.0/24 - - cidr: 10.228.230.0/24 - - cidr: 10.228.231.0/24 - ignore_errors: true - - name: destroy internet gateway - ec2_vpc_igw: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Name: '{{ resource_prefix }}' - state: absent - register: remove_igw - retries: 10 - delay: 5 - until: remove_igw is success - ignore_errors: true - - name: destroy VPC - ec2_vpc_net: - cidr_block: 10.228.228.0/22 - name: '{{ resource_prefix }}_vpc' - state: absent - register: remove_vpc - retries: 10 - delay: 5 - until: remove_vpc is success - ignore_errors: true diff --git a/tests/integration/targets/elb_application_lb_info/tasks/test_elb_application_lb_info.yml b/tests/integration/targets/elb_application_lb_info/tasks/test_elb_application_lb_info.yml deleted file mode 100644 index 229ac43001b..00000000000 --- a/tests/integration/targets/elb_application_lb_info/tasks/test_elb_application_lb_info.yml +++ /dev/null @@ -1,41 +0,0 @@ -- block: - - - name: create ALB with a listener - elb_application_lb: - name: "{{ alb_name }}" - subnets: "{{ alb_subnets }}" - security_groups: "{{ sec_group.group_id }}" - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ tg_name }}" - register: alb - - - assert: - that: - - alb.changed - - alb.listeners|length == 1 - - alb.listeners[0].rules|length == 1 - - - name: ELB applicaiton info using load balancer arn - elb_application_lb_info: - load_balancer_arns: - - "{{ alb.load_balancer_arn }}" - register: elb_app_lb_info - - - assert: - that: - - elb_app_lb_info.load_balancers[0].ip_address_type == 'ipv4' - - - name: ELB applicaiton info using load balancer name - elb_application_lb_info: - names: - - "{{ alb.load_balancer_name }}" - register: elb_app_lb_info - - - assert: - that: - - elb_app_lb_info.load_balancers[0].ip_address_type == 'ipv4' From c91acf6a14c0aa69974241ead13223a84dbd5334 Mon Sep 17 00:00:00 2001 From: Stefan Horning Date: Thu, 10 Feb 2022 11:36:13 +0100 Subject: [PATCH 23/31] Extended the wafv2_web_acl module with custom_response_bodies argument (#721) Extended the wafv2_web_acl module with custom_response_bodies argument SUMMARY Extended the wafv2_web_acl module to also take the custom_response_bodies argument, improved docs and extended tests ISSUE TYPE Feature Pull Request COMPONENT NAME wafv2_web_acl ADDITIONAL INFORMATION Also touched docs of aws_waf_web_acl to make it easier to find the WAF v2 modules as I had trouble finding that at first. Reviewed-by: Markus Bergholz Reviewed-by: Stefan Horning Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- changelogs/fragments/721-wafv2_web_acl.yml | 3 + plugins/modules/aws_waf_web_acl.py | 6 +- plugins/modules/wafv2_web_acl.py | 144 ++++-- tests/integration/targets/wafv2/meta/main.yml | 6 + tests/integration/targets/wafv2/tasks/alb.yml | 1 - .../targets/wafv2/tasks/create_webacl.yml | 410 ++++++++++++------ .../integration/targets/wafv2/tasks/main.yml | 9 +- .../targets/wafv2/tasks/test_webacl.yml | 3 +- 8 files changed, 405 insertions(+), 177 deletions(-) create mode 100644 changelogs/fragments/721-wafv2_web_acl.yml create mode 100644 tests/integration/targets/wafv2/meta/main.yml diff --git a/changelogs/fragments/721-wafv2_web_acl.yml b/changelogs/fragments/721-wafv2_web_acl.yml new file mode 100644 index 00000000000..a5bcf2f7330 --- /dev/null +++ b/changelogs/fragments/721-wafv2_web_acl.yml @@ -0,0 +1,3 @@ +minor_changes: +- wafv2_web_acl - Extended the wafv2_web_acl module to also take the ``custom_response_bodies`` argument (https://github.com/ansible-collections/community.aws/pull/721). +- wafv2_web_acl - Documentation updates wafv2_web_acl and aws_waf_web_acl (https://github.com/ansible-collections/community.aws/pull/721). diff --git a/plugins/modules/aws_waf_web_acl.py b/plugins/modules/aws_waf_web_acl.py index 7cdf770aa38..609df528a0a 100644 --- a/plugins/modules/aws_waf_web_acl.py +++ b/plugins/modules/aws_waf_web_acl.py @@ -8,11 +8,11 @@ DOCUMENTATION = r''' module: aws_waf_web_acl -short_description: Create and delete WAF Web ACLs. +short_description: Create and delete WAF Web ACLs version_added: 1.0.0 description: - - Read the AWS documentation for WAF - U(https://aws.amazon.com/documentation/waf/). + - Module for WAF classic, for WAF v2 use the I(wafv2_*) modules. + - Read the AWS documentation for WAF U(https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html). author: - Mike Mochan (@mmochan) diff --git a/plugins/modules/wafv2_web_acl.py b/plugins/modules/wafv2_web_acl.py index 5306c2e047f..b11b0872b0e 100644 --- a/plugins/modules/wafv2_web_acl.py +++ b/plugins/modules/wafv2_web_acl.py @@ -11,9 +11,10 @@ version_added: 1.5.0 author: - "Markus Bergholz (@markuman)" -short_description: wafv2_web_acl +short_description: Create and delete WAF Web ACLs description: - - Create, modify or delete a wafv2 web acl. + - Create, modify or delete AWS WAF v2 web ACLs (not for classic WAF). + - See docs at U(https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html) options: state: description: @@ -28,9 +29,9 @@ type: str scope: description: - - Scope of wafv2 web acl. + - Geographical scope of the web acl. required: true - choices: ["CLOUDFRONT","REGIONAL"] + choices: ["CLOUDFRONT", "REGIONAL"] type: str description: description: @@ -39,7 +40,7 @@ default_action: description: - Default action of the wafv2 web acl. - choices: ["Block","Allow"] + choices: ["Block", "Allow"] type: str sampled_requests: description: @@ -87,6 +88,14 @@ description: - Rule configuration. type: dict + custom_response_bodies: + description: + - A map of custom response keys and content bodies. Define response bodies here and reference them in the rules by providing + - the key of the body dictionary element. + - Each element must have a unique dict key and in the dict two keys for I(content_type) and I(content). + - Requires botocore >= 1.21.0 + type: dict + version_added: 3.1.0 purge_rules: description: - When set to C(no), keep the existing load balancer rules in place. Will modify and add, but will not delete. @@ -100,16 +109,15 @@ ''' EXAMPLES = ''' -- name: create web acl +- name: Create test web acl community.aws.wafv2_web_acl: name: test05 - state: present description: hallo eins scope: REGIONAL default_action: Allow sampled_requests: no cloudwatch_metrics: yes - metric_name: blub + metric_name: test05-acl-metric rules: - name: zwei priority: 0 @@ -191,10 +199,56 @@ text_transformations: - type: LOWERCASE priority: 0 + purge_rules: yes tags: A: B C: D - register: out + state: present + +- name: Create IP filtering web ACL + community.aws.wafv2_web_acl: + name: ip-filtering-traffic + description: ACL that filters web traffic based on rate limits and whitelists some IPs + scope: REGIONAL + default_action: Allow + sampled_requests: yes + cloudwatch_metrics: yes + metric_name: ip-filtering-traffic + rules: + - name: whitelist-own-IPs + priority: 0 + action: + allow: {} + statement: + ip_set_reference_statement: + arn: 'arn:aws:wafv2:us-east-1:520789123123:regional/ipset/own-public-ips/1c4bdfc4-0f77-3b23-5222-123123123' + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: waf-acl-rule-whitelist-own-IPs + - name: rate-limit-per-IP + priority: 1 + action: + block: + custom_response: + response_code: 429 + custom_response_body_key: too_many_requests + statement: + rate_based_statement: + limit: 5000 + aggregate_key_type: IP + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: waf-acl-rule-rate-limit-per-IP + purge_rules: yes + custom_response_bodies: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + region: us-east-1 + state: present + ''' RETURN = """ @@ -218,6 +272,12 @@ sample: test02 returned: Always, as long as the web acl exists type: str +default_action: + description: Default action of ACL + returned: Always, as long as the web acl exists + sample: + allow: {} + type: dict rules: description: Current rules of the web acl returned: Always, as long as the web acl exists @@ -235,6 +295,14 @@ cloud_watch_metrics_enabled: true metric_name: admin_protect sampled_requests_enabled: true +custom_response_bodies: + description: Custom response body configurations to be used in rules + type: dict + sample: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + returned: Always, as long as the web acl exists visibility_config: description: Visibility config of the web acl returned: Always, as long as the web acl exists @@ -267,22 +335,27 @@ def __init__(self, wafv2, name, scope, fail_json_aws): self.fail_json_aws = fail_json_aws self.existing_acl, self.id, self.locktoken = self.get_web_acl() - def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name): + def update(self, default_action, description, rules, sampled_requests, cloudwatch_metrics, metric_name, custom_response_bodies): + req_obj = { + 'Name': self.name, + 'Scope': self.scope, + 'Id': self.id, + 'DefaultAction': default_action, + 'Description': description, + 'Rules': rules, + 'VisibilityConfig': { + 'SampledRequestsEnabled': sampled_requests, + 'CloudWatchMetricsEnabled': cloudwatch_metrics, + 'MetricName': metric_name + }, + 'LockToken': self.locktoken + } + + if custom_response_bodies: + req_obj['CustomResponseBodies'] = custom_response_bodies + try: - response = self.wafv2.update_web_acl( - Name=self.name, - Scope=self.scope, - Id=self.id, - DefaultAction=default_action, - Description=description, - Rules=rules, - VisibilityConfig={ - 'SampledRequestsEnabled': sampled_requests, - 'CloudWatchMetricsEnabled': cloudwatch_metrics, - 'MetricName': metric_name - }, - LockToken=self.locktoken - ) + response = self.wafv2.update_web_acl(**req_obj) except (BotoCoreError, ClientError) as e: self.fail_json_aws(e, msg="Failed to update wafv2 web acl.") return response @@ -331,7 +404,7 @@ def get_web_acl(self): def list(self): return wafv2_list_web_acls(self.wafv2, self.scope, self.fail_json_aws) - def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description): + def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, metric_name, tags, description, custom_response_bodies): req_obj = { 'Name': self.name, 'Scope': self.scope, @@ -343,6 +416,9 @@ def create(self, default_action, rules, sampled_requests, cloudwatch_metrics, me 'MetricName': metric_name } } + + if custom_response_bodies: + req_obj['CustomResponseBodies'] = custom_response_bodies if description: req_obj['Description'] = description if tags: @@ -370,6 +446,7 @@ def main(): cloudwatch_metrics=dict(type='bool', default=True), metric_name=dict(type='str'), tags=dict(type='dict'), + custom_response_bodies=dict(type='dict'), purge_rules=dict(default=True, type='bool') ) @@ -392,6 +469,14 @@ def main(): purge_rules = module.params.get("purge_rules") check_mode = module.check_mode + custom_response_bodies = module.params.get("custom_response_bodies") + if custom_response_bodies: + module.require_botocore_at_least('1.21.0', reason='to set custom response bodies') + custom_response_bodies = {} + + for custom_name, body in module.params.get("custom_response_bodies").items(): + custom_response_bodies[custom_name] = snake_dict_to_camel_dict(body, capitalize_first=True) + if default_action == 'Block': default_action = {'Block': {}} elif default_action == 'Allow': @@ -422,7 +507,8 @@ def main(): rules, sampled_requests, cloudwatch_metrics, - metric_name + metric_name, + custom_response_bodies ) else: @@ -438,7 +524,8 @@ def main(): cloudwatch_metrics, metric_name, tags, - description + description, + custom_response_bodies ) elif state == 'absent': @@ -453,7 +540,8 @@ def main(): rules, sampled_requests, cloudwatch_metrics, - metric_name + metric_name, + custom_response_bodies ) else: change = True diff --git a/tests/integration/targets/wafv2/meta/main.yml b/tests/integration/targets/wafv2/meta/main.yml new file mode 100644 index 00000000000..4afae0b9340 --- /dev/null +++ b/tests/integration/targets/wafv2/meta/main.yml @@ -0,0 +1,6 @@ +dependencies: + - setup_remote_tmp_dir + - role: setup_botocore_pip + vars: + boto3_version: "1.18.0" + botocore_version: "1.21.0" diff --git a/tests/integration/targets/wafv2/tasks/alb.yml b/tests/integration/targets/wafv2/tasks/alb.yml index 6ecb0abb01d..32aeb376a3b 100644 --- a/tests/integration/targets/wafv2/tasks/alb.yml +++ b/tests/integration/targets/wafv2/tasks/alb.yml @@ -101,6 +101,5 @@ - assert: that: - - alb.changed - alb.listeners|length == 1 - alb.listeners[0].rules|length == 1 diff --git a/tests/integration/targets/wafv2/tasks/create_webacl.yml b/tests/integration/targets/wafv2/tasks/create_webacl.yml index 8b195ab570c..978ab282a60 100644 --- a/tests/integration/targets/wafv2/tasks/create_webacl.yml +++ b/tests/integration/targets/wafv2/tasks/create_webacl.yml @@ -1,151 +1,275 @@ ####################### ## Create web acl ####################### -- name: check_mode create web acl - wafv2_web_acl: - name: "{{ web_acl_name }}" - state: present - description: hallo eins - scope: REGIONAL - default_action: Allow - sampled_requests: no - cloudwatch_metrics: yes - metric_name: blub - rules: - - name: zwei - priority: 2 - action: - block: {} - visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes - metric_name: ddos - statement: - xss_match_statement: - field_to_match: - body: {} - text_transformations: - - type: NONE - priority: 0 - - name: admin_protect - priority: 1 - override_action: - none: {} - visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes - metric_name: fsd - statement: - managed_rule_group_statement: - vendor_name: AWS - name: AWSManagedRulesAdminProtectionRuleSet - tags: - A: B - C: D - register: out - check_mode: yes -- name: check_mode verify create - assert: - that: - - out is changed +- name: Wrap test in virtualenv created above (use other python interpreter) + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + block: -- name: create web acl - wafv2_web_acl: - name: "{{ web_acl_name }}" - state: present - description: hallo eins - scope: REGIONAL - default_action: Allow - sampled_requests: no - cloudwatch_metrics: yes - metric_name: blub - rules: - - name: zwei - priority: 2 - action: - block: {} - visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes - metric_name: ddos - statement: - xss_match_statement: - field_to_match: - body: {} - text_transformations: - - type: NONE - priority: 0 - - name: admin_protect - priority: 1 - override_action: - none: {} - visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes - metric_name: fsd - statement: - managed_rule_group_statement: - vendor_name: AWS - name: AWSManagedRulesAdminProtectionRuleSet - tags: - A: B - C: D - register: ACL + - name: check_mode create web acl + wafv2_web_acl: + name: "{{ web_acl_name }}" + state: present + description: hallo eins + scope: REGIONAL + default_action: Allow + sampled_requests: no + cloudwatch_metrics: yes + metric_name: blub + rules: + - name: zwei + priority: 2 + action: + block: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: ddos + statement: + xss_match_statement: + field_to_match: + body: {} + text_transformations: + - type: NONE + priority: 0 + - name: admin_protect + priority: 1 + override_action: + none: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: fsd + statement: + managed_rule_group_statement: + vendor_name: AWS + name: AWSManagedRulesAdminProtectionRuleSet + custom_response_bodies: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + tags: + A: B + C: D + register: out + check_mode: yes -- name: verify create - assert: - that: - - ACL is changed - - ACL.web_acl.name == web_acl_name - - not ACL.web_acl.visibility_config.sampled_requests_enabled - - ACL.web_acl.rules | count == 2 - - ACL.web_acl.description == 'hallo eins' + - name: check_mode verify create + assert: + that: + - out is changed -- name: immutable create web acl - wafv2_web_acl: - name: "{{ web_acl_name }}" - state: present - description: hallo eins - scope: REGIONAL - default_action: Allow - sampled_requests: no - cloudwatch_metrics: yes - metric_name: blub - rules: - - name: zwei - priority: 2 - action: - block: {} - visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes - metric_name: ddos - statement: - xss_match_statement: - field_to_match: - body: {} - text_transformations: - - type: NONE - priority: 0 - - name: admin_protect - priority: 1 - override_action: - none: {} - visibility_config: - sampled_requests_enabled: yes - cloud_watch_metrics_enabled: yes - metric_name: fsd - statement: - managed_rule_group_statement: - vendor_name: AWS - name: AWSManagedRulesAdminProtectionRuleSet - tags: - A: B - C: D - register: out + - name: Create web acl with custom response bodies + wafv2_web_acl: + name: "{{ resource_prefix }}-acl-with-response-body" + state: present + description: foo + scope: REGIONAL + default_action: Allow + sampled_requests: no + cloudwatch_metrics: no + rules: + - name: rate-limit-per-IP + priority: 1 + action: + block: + custom_response: + response_code: 429 + custom_response_body_key: too_many_requests + statement: + rate_based_statement: + limit: 1000 + aggregate_key_type: IP + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: no + metric_name: unused + custom_response_bodies: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + register: acl_with_response_body -- name: verify create - assert: - that: - - out is not changed \ No newline at end of file + - name: Web acl with custom response bodies verify create + assert: + that: + - acl_with_response_body is changed + - acl_with_response_body.web_acl.rules | count == 1 + - acl_with_response_body.web_acl.custom_response_bodies.too_many_requests is defined + + - name: Update web acl with custom response bodies to remove custom response + wafv2_web_acl: + name: "{{ resource_prefix }}-acl-with-response-body" + state: present + scope: REGIONAL + description: foo + default_action: Allow + sampled_requests: no + cloudwatch_metrics: no + rules: + - name: rate-limit-per-IP + priority: 1 + action: + block: {} + statement: + rate_based_statement: + limit: 1000 + aggregate_key_type: IP + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: no + metric_name: unused + custom_response_bodies: {} + + # unfortunately the wafv2_web_acl does not return the ACL structure after an update + # hence we have to do another task here using the info module to retrieve the latest state + # of the ACL and then to check it + - name: check if custom response body was really removed + wafv2_web_acl_info: + name: "{{ resource_prefix }}-acl-with-response-body" + scope: REGIONAL + register: acl_without_response_bodies + + - name: Web acl with custom response bodies verify removal of custom response + assert: + that: + - acl_without_response_bodies.custom_response_bodies is undefined + + - name: create web acl + wafv2_web_acl: + name: "{{ web_acl_name }}" + state: present + description: hallo eins + scope: REGIONAL + default_action: Allow + sampled_requests: no + cloudwatch_metrics: yes + metric_name: blub + rules: + - name: zwei + priority: 2 + action: + block: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: ddos + statement: + xss_match_statement: + field_to_match: + body: {} + text_transformations: + - type: NONE + priority: 0 + - name: admin_protect + priority: 1 + override_action: + none: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: fsd + statement: + managed_rule_group_statement: + vendor_name: AWS + name: AWSManagedRulesAdminProtectionRuleSet + - name: rate-limit-per-IP + priority: 3 + action: + block: + custom_response: + response_code: 429 + custom_response_body_key: too_many_requests + statement: + rate_based_statement: + limit: 5000 + aggregate_key_type: IP + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: waf-acl-rule-rate-limit-per-IP + custom_response_bodies: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + tags: + A: B + C: D + register: ACL + + - name: verify create + assert: + that: + - ACL is changed + - ACL.web_acl.name == web_acl_name + - not ACL.web_acl.visibility_config.sampled_requests_enabled + - ACL.web_acl.rules | count == 3 + - ACL.web_acl.description == 'hallo eins' + + - name: immutable create web acl + wafv2_web_acl: + name: "{{ web_acl_name }}" + state: present + description: hallo eins + scope: REGIONAL + default_action: Allow + sampled_requests: no + cloudwatch_metrics: yes + metric_name: blub + rules: + - name: zwei + priority: 2 + action: + block: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: ddos + statement: + xss_match_statement: + field_to_match: + body: {} + text_transformations: + - type: NONE + priority: 0 + - name: admin_protect + priority: 1 + override_action: + none: {} + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: fsd + statement: + managed_rule_group_statement: + vendor_name: AWS + name: AWSManagedRulesAdminProtectionRuleSet + - name: rate-limit-per-IP + priority: 3 + action: + block: + custom_response: + response_code: 429 + custom_response_body_key: too_many_requests + statement: + rate_based_statement: + limit: 5000 + aggregate_key_type: IP + visibility_config: + sampled_requests_enabled: yes + cloud_watch_metrics_enabled: yes + metric_name: waf-acl-rule-rate-limit-per-IP + custom_response_bodies: + too_many_requests: + content_type: APPLICATION_JSON + content: '{ message: "Your request has been blocked due to too many HTTP requests coming from your IP" }' + tags: + A: B + C: D + register: out + + - name: verify create + assert: + that: + - out is not changed diff --git a/tests/integration/targets/wafv2/tasks/main.yml b/tests/integration/targets/wafv2/tasks/main.yml index fa6e7fb3d86..547c4c15105 100644 --- a/tests/integration/targets/wafv2/tasks/main.yml +++ b/tests/integration/targets/wafv2/tasks/main.yml @@ -64,7 +64,7 @@ assert: that: - out is not changed - + always: ################################### # always delete wafv2 components @@ -91,6 +91,13 @@ scope: REGIONAL ignore_errors: true + - name: Ensure ACL with response body is removed + wafv2_web_acl: + name: "{{ resource_prefix }}-acl-with-response-body" + state: absent + scope: REGIONAL + ignore_errors: true + ######################### # remove alb and its deps ######################### diff --git a/tests/integration/targets/wafv2/tasks/test_webacl.yml b/tests/integration/targets/wafv2/tasks/test_webacl.yml index 2749450ab26..2d09eb3ebc9 100644 --- a/tests/integration/targets/wafv2/tasks/test_webacl.yml +++ b/tests/integration/targets/wafv2/tasks/test_webacl.yml @@ -10,7 +10,7 @@ - name: verify rules assert: that: - - out.rules | count == 2 + - out.rules | count == 3 - name: change web acl description wafv2_web_acl: @@ -116,6 +116,7 @@ sampled_requests: no cloudwatch_metrics: yes metric_name: blub + purge_rules: yes rules: - name: admin_protect priority: 1 From 44daa2ded8dc9f1dab0f7a4643176fe668a2a89c Mon Sep 17 00:00:00 2001 From: Mark Woolley Date: Thu, 10 Feb 2022 12:26:27 +0000 Subject: [PATCH 24/31] Refactor iam_managed_policy module and add integration tests (#893) Refactor iam_managed_policy module and add integration tests SUMMARY Refactor iam_managed_policy module to: Improve AWS retry backoff logic Add check_mode support Fix module exit on updates to policies when no changes are present Other changes: Add disabled integration tests ISSUE TYPE Bugfix Pull Request COMPONENT NAME iam_managed_policy ADDITIONAL INFORMATION Backoff logic only partially covered the module, and it didn't support check_mode or have any integration tests. Due to the nature of the IAM based modules the tests are intentionally disabled but have been run locally: ansible-test integration iam_managed_policy --allow-unsupported --docker PLAY RECAP ********************************************************************* testhost : ok=20 changed=6 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 AWS ACTIONS: ['iam:CreatePolicy', 'iam:CreatePolicyVersion', 'iam:DeletePolicy', 'iam:DeletePolicyVersion', 'iam:GetPolicy', 'iam:GetPolicyVersion', 'iam:ListEntitiesForPolicy', 'iam:ListPolicies', 'iam:ListPolicyVersions', 'iam:SetDefaultPolicyVersion'] Reviewed-by: Alina Buzachis Reviewed-by: Markus Bergholz --- .../893-refactor-iam_managed_policy.yml | 2 + plugins/modules/iam_managed_policy.py | 201 ++++++++++-------- .../targets/iam_managed_policy/aliases | 6 + .../iam_managed_policy/defaults/main.yml | 2 + .../targets/iam_managed_policy/tasks/main.yml | 160 ++++++++++++++ 5 files changed, 284 insertions(+), 87 deletions(-) create mode 100644 changelogs/fragments/893-refactor-iam_managed_policy.yml create mode 100644 tests/integration/targets/iam_managed_policy/aliases create mode 100644 tests/integration/targets/iam_managed_policy/defaults/main.yml create mode 100644 tests/integration/targets/iam_managed_policy/tasks/main.yml diff --git a/changelogs/fragments/893-refactor-iam_managed_policy.yml b/changelogs/fragments/893-refactor-iam_managed_policy.yml new file mode 100644 index 00000000000..22db07fb152 --- /dev/null +++ b/changelogs/fragments/893-refactor-iam_managed_policy.yml @@ -0,0 +1,2 @@ +minor_changes: + - iam_managed_policy - refactor module adding ``check_mode`` and better AWSRetry backoff logic (https://github.com/ansible-collections/community.aws/pull/893). diff --git a/plugins/modules/iam_managed_policy.py b/plugins/modules/iam_managed_policy.py index 2b33d711e71..403b4720d50 100644 --- a/plugins/modules/iam_managed_policy.py +++ b/plugins/modules/iam_managed_policy.py @@ -6,7 +6,7 @@ __metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r''' --- module: iam_managed_policy version_added: 1.0.0 @@ -55,7 +55,7 @@ - amazon.aws.ec2 ''' -EXAMPLES = ''' +EXAMPLES = r''' # Create Policy ex nihilo - name: Create IAM Managed Policy community.aws.iam_managed_policy: @@ -107,11 +107,12 @@ state: absent ''' -RETURN = ''' +RETURN = r''' policy: description: Returns the policy json structure, when state == absent this will return the value of the removed policy. returned: success - type: str + type: complex + contains: {} sample: '{ "arn": "arn:aws:iam::aws:policy/AdministratorAccess " "attachment_count": 0, @@ -142,14 +143,14 @@ @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) -def list_policies_with_backoff(iam): - paginator = iam.get_paginator('list_policies') +def list_policies_with_backoff(): + paginator = client.get_paginator('list_policies') return paginator.paginate(Scope='Local').build_full_result() -def get_policy_by_name(module, iam, name): +def get_policy_by_name(name): try: - response = list_policies_with_backoff(iam) + response = list_policies_with_backoff() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policies") for policy in response['Policies']: @@ -158,32 +159,36 @@ def get_policy_by_name(module, iam, name): return None -def delete_oldest_non_default_version(module, iam, policy): +def delete_oldest_non_default_version(policy): try: - versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] if not v['IsDefaultVersion']] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") versions.sort(key=lambda v: v['CreateDate'], reverse=True) for v in versions[-1:]: try: - iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") # This needs to return policy_version, changed -def get_or_create_policy_version(module, iam, policy, policy_document): +def get_or_create_policy_version(policy, policy_document): try: - versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] + versions = client.list_policy_versions(PolicyArn=policy['Arn'])['Versions'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") + for v in versions: try: - document = iam.get_policy_version(PolicyArn=policy['Arn'], - VersionId=v['VersionId'])['PolicyVersion']['Document'] + document = client.get_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])['PolicyVersion']['Document'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v['VersionId'])) + + if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): + return v, True + # If the current policy matches the existing one if not compare_policies(document, json.loads(to_native(policy_document))): return v, False @@ -195,12 +200,12 @@ def get_or_create_policy_version(module, iam, policy, policy_document): # and if that doesn't work, delete the oldest non default policy version # and try again. try: - version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] return version, True except is_boto3_error_code('LimitExceeded'): - delete_oldest_non_default_version(module, iam, policy) + delete_oldest_non_default_version(policy) try: - version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] + version = client.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion'] return version, True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as second_e: module.fail_json_aws(second_e, msg="Couldn't create policy version") @@ -208,58 +213,132 @@ def get_or_create_policy_version(module, iam, policy, policy_document): module.fail_json_aws(e, msg="Couldn't create policy version") -def set_if_default(module, iam, policy, policy_version, is_default): +def set_if_default(policy, policy_version, is_default): if is_default and not policy_version['IsDefaultVersion']: try: - iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) + client.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't set default policy version") return True return False -def set_if_only(module, iam, policy, policy_version, is_only): +def set_if_only(policy, policy_version, is_only): if is_only: try: - versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[ + versions = [v for v in client.list_policy_versions(PolicyArn=policy['Arn'])[ 'Versions'] if not v['IsDefaultVersion']] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't list policy versions") for v in versions: try: - iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) + client.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete policy version") return len(versions) > 0 return False -def detach_all_entities(module, iam, policy, **kwargs): +def detach_all_entities(policy, **kwargs): try: - entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) + entities = client.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy['PolicyName'])) for g in entities['PolicyGroups']: try: - iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) + client.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g['GroupName'])) for u in entities['PolicyUsers']: try: - iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) + client.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u['UserName'])) for r in entities['PolicyRoles']: try: - iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) + client.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r['RoleName'])) if entities['IsTruncated']: - detach_all_entities(module, iam, policy, marker=entities['Marker']) + detach_all_entities(policy, marker=entities['Marker']) + + +def create_or_update_policy(existing_policy): + name = module.params.get('policy_name') + description = module.params.get('policy_description') + default = module.params.get('make_default') + only = module.params.get('only_version') + + policy = None + + if module.params.get('policy') is not None: + policy = json.dumps(json.loads(module.params.get('policy'))) + + if existing_policy is None: + if module.check_mode: + module.exit_json(changed=True) + + # Create policy when none already exists + try: + rvalue = client.create_policy(PolicyName=name, Path='/', PolicyDocument=policy, Description=description) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) + + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) + else: + policy_version, changed = get_or_create_policy_version(existing_policy, policy) + changed = set_if_default(existing_policy, policy_version, default) or changed + changed = set_if_only(existing_policy, policy_version, only) or changed + + # If anything has changed we need to refresh the policy + if changed: + try: + updated_policy = client.get_policy(PolicyArn=existing_policy['Arn'])['Policy'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json(msg="Couldn't get policy") + + module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(updated_policy)) + else: + module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(existing_policy)) + + +def delete_policy(existing_policy): + # Check for existing policy + if existing_policy: + if module.check_mode: + module.exit_json(changed=True) + + # Detach policy + detach_all_entities(existing_policy) + # Delete Versions + try: + versions = client.list_policy_versions(PolicyArn=existing_policy['Arn'])['Versions'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't list policy versions") + for v in versions: + if not v['IsDefaultVersion']: + try: + client.delete_policy_version(PolicyArn=existing_policy['Arn'], VersionId=v['VersionId']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws( + e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) + # Delete policy + try: + client.delete_policy(PolicyArn=existing_policy['Arn']) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy['PolicyName'])) + + # This is the one case where we will return the old policy + module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) + else: + module.exit_json(changed=False, policy=None) def main(): + global module + global client + argument_spec = dict( policy_name=dict(required=True), policy_description=dict(default=''), @@ -273,75 +352,23 @@ def main(): module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[['state', 'present', ['policy']]], + supports_check_mode=True ) name = module.params.get('policy_name') - description = module.params.get('policy_description') state = module.params.get('state') - default = module.params.get('make_default') - only = module.params.get('only_version') - - policy = None - - if module.params.get('policy') is not None: - policy = json.dumps(json.loads(module.params.get('policy'))) try: - iam = module.client('iam') + client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Failed to connect to AWS') - p = get_policy_by_name(module, iam, name) - if state == 'present': - if p is None: - # No Policy so just create one - try: - rvalue = iam.create_policy(PolicyName=name, Path='/', - PolicyDocument=policy, Description=description) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) - - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy'])) - else: - policy_version, changed = get_or_create_policy_version(module, iam, p, policy) - changed = set_if_default(module, iam, p, policy_version, default) or changed - changed = set_if_only(module, iam, p, policy_version, only) or changed - # If anything has changed we needto refresh the policy - if changed: - try: - p = iam.get_policy(PolicyArn=p['Arn'])['Policy'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Couldn't get policy") + existing_policy = get_policy_by_name(name) - module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p)) + if state == 'present': + create_or_update_policy(existing_policy) else: - # Check for existing policy - if p: - # Detach policy - detach_all_entities(module, iam, p) - # Delete Versions - try: - versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't list policy versions") - for v in versions: - if not v['IsDefaultVersion']: - try: - iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't delete policy version {0}".format(v['VersionId'])) - # Delete policy - try: - iam.delete_policy(PolicyArn=p['Arn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(p['PolicyName'])) - - # This is the one case where we will return the old policy - module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p)) - else: - module.exit_json(changed=False, policy=None) -# end main + delete_policy(existing_policy) if __name__ == '__main__': diff --git a/tests/integration/targets/iam_managed_policy/aliases b/tests/integration/targets/iam_managed_policy/aliases new file mode 100644 index 00000000000..839bd014bd7 --- /dev/null +++ b/tests/integration/targets/iam_managed_policy/aliases @@ -0,0 +1,6 @@ +# reason: missing-policy +# It's not possible to control what permissions are granted to a policy. +# This makes securely testing iam_policy very difficult +unsupported + +cloud/aws diff --git a/tests/integration/targets/iam_managed_policy/defaults/main.yml b/tests/integration/targets/iam_managed_policy/defaults/main.yml new file mode 100644 index 00000000000..a6edcacefae --- /dev/null +++ b/tests/integration/targets/iam_managed_policy/defaults/main.yml @@ -0,0 +1,2 @@ +--- +policy_name: "{{ resource_prefix }}-policy" diff --git a/tests/integration/targets/iam_managed_policy/tasks/main.yml b/tests/integration/targets/iam_managed_policy/tasks/main.yml new file mode 100644 index 00000000000..f17b7cad096 --- /dev/null +++ b/tests/integration/targets/iam_managed_policy/tasks/main.yml @@ -0,0 +1,160 @@ +--- +- name: "Run integration tests for IAM managed policy" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + block: + ## Test policy creation + - name: Create IAM managed policy - check mode + iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Deny" + Action: "logs:CreateLogGroup" + Resource: "*" + state: present + register: result + check_mode: yes + + - name: Create IAM managed policy - check mode + assert: + that: + - result.changed + + - name: Create IAM managed policy + iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Deny" + Action: "logs:CreateLogGroup" + Resource: "*" + state: present + register: result + + - name: Create IAM managed policy + assert: + that: + - result.changed + - result.policy.policy_name == policy_name + + - name: Create IAM managed policy - idempotency check + iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Deny" + Action: "logs:CreateLogGroup" + Resource: "*" + state: present + register: result + + - name: Create IAM managed policy - idempotency check + assert: + that: + - not result.changed + + ## Test policy update + - name: Update IAM managed policy - check mode + iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Deny" + Action: "logs:Describe*" + Resource: "*" + state: present + register: result + check_mode: yes + + - name: Update IAM managed policy - check mode + assert: + that: + - result.changed + + - name: Update IAM managed policy + iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Deny" + Action: "logs:Describe*" + Resource: "*" + state: present + register: result + + - name: Update IAM managed policy + assert: + that: + - result.changed + - result.policy.policy_name == policy_name + + - name: Update IAM managed policy - idempotency check + iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Deny" + Action: "logs:Describe*" + Resource: "*" + state: present + register: result + + - name: Update IAM managed policy - idempotency check + assert: + that: + - not result.changed + + ## Test policy deletion + - name: Delete IAM managed policy - check mode + iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + register: result + check_mode: yes + + - name: Delete IAM managed policy - check mode + assert: + that: + - result.changed + + - name: Delete IAM managed policy + iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + register: result + + - name: Delete IAM managed policy + assert: + that: + - result.changed + + - name: Delete IAM managed policy - idempotency check + iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + register: result + + - name: Delete IAM managed policy - idempotency check + assert: + that: + - not result.changed + + always: + - name: Delete IAM managed policy + iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + ignore_errors: yes From f58525b15943720b8af8a633623e8ffd9f94adc3 Mon Sep 17 00:00:00 2001 From: Mandar Kulkarni Date: Tue, 15 Feb 2022 12:14:29 -0800 Subject: [PATCH 25/31] ec2_asg: enable integration tests, change instance type to a type allowed by policy (#815) ec2_asg: enable integration tests, change instance type to a type allowed by policy Depends-On: ansible/ansible-zuul-jobs#1247 SUMMARY Enable ec2_asg integration tests as fix for resolving ec2_asg failing due to policies missing is merged. Also change the instance type used in couple of assertions to a type which is allowed by the policy to fix failure. ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_asg Reviewed-by: Jill R --- tests/integration/targets/ec2_asg/aliases | 3 +- .../targets/ec2_asg/defaults/main.yml | 1 + .../targets/ec2_asg/tasks/main.yml | 86 ++++--------------- 3 files changed, 19 insertions(+), 71 deletions(-) diff --git a/tests/integration/targets/ec2_asg/aliases b/tests/integration/targets/ec2_asg/aliases index a1db4545242..6f8f60998c7 100644 --- a/tests/integration/targets/ec2_asg/aliases +++ b/tests/integration/targets/ec2_asg/aliases @@ -1,6 +1,5 @@ # reason: slow # Tests take around 30 minutes -# reason: broken -disabled +slow cloud/aws diff --git a/tests/integration/targets/ec2_asg/defaults/main.yml b/tests/integration/targets/ec2_asg/defaults/main.yml index 80bf25cd001..be66f56f2d3 100644 --- a/tests/integration/targets/ec2_asg/defaults/main.yml +++ b/tests/integration/targets/ec2_asg/defaults/main.yml @@ -2,3 +2,4 @@ # defaults file for ec2_asg # Amazon Linux 2 AMI 2019.06.12 (HVM), GP2 Volume Type ec2_ami_name: 'amzn2-ami-hvm-2.0.20190612-x86_64-gp2' +load_balancer_name: "{{ tiny_prefix }}-lb" diff --git a/tests/integration/targets/ec2_asg/tasks/main.yml b/tests/integration/targets/ec2_asg/tasks/main.yml index 7f196442904..800c167bde8 100644 --- a/tests/integration/targets/ec2_asg/tasks/main.yml +++ b/tests/integration/targets/ec2_asg/tasks/main.yml @@ -47,15 +47,7 @@ - set_fact: ec2_ami_image: '{{ ec2_amis.images[0].image_id }}' - - name: load balancer name has to be less than 32 characters - # the 8 digit identifier at the end of resource_prefix helps determine during which test something - # was created - set_fact: - load_balancer_name: "{{ item }}-lb" - loop: "{{ resource_prefix | regex_findall('.{8}$') }}" - # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations - - name: Create VPC for use in testing ec2_vpc_net: name: "{{ resource_prefix }}-vpc" @@ -239,8 +231,6 @@ that: - output is not changed - # - name: pause for a bit to make sure that the group can't be trivially deleted - # pause: seconds=30 - name: kill asg ec2_asg: name: "{{ resource_prefix }}-asg" @@ -349,7 +339,6 @@ # ============================================================ # grow scaling group to 3 - - name: add 2 more instances wait for instances to be deemed healthy (ELB) ec2_asg: name: "{{ resource_prefix }}-asg" @@ -373,7 +362,6 @@ # ============================================================ # Test max_instance_lifetime option - - name: enable asg max_instance_lifetime ec2_asg: name: "{{ resource_prefix }}-asg" @@ -409,8 +397,7 @@ # ============================================================ - # # perform rolling replace with different launch configuration - + # perform rolling replace with different launch configuration - name: perform rolling update to new AMI ec2_asg: name: "{{ resource_prefix }}-asg" @@ -442,7 +429,6 @@ # ============================================================ # perform rolling replace with the original launch configuration - - name: perform rolling update to new AMI while removing the load balancer ec2_asg: name: "{{ resource_prefix }}-asg" @@ -475,10 +461,6 @@ # ============================================================ # perform rolling replace with new launch configuration and lc_check:false - - # Note - this is done async so we can query asg_facts during - # the execution. Issues #28087 and #35993 result in correct - # end result, but spin up extraneous instances during execution. - name: "perform rolling update to new AMI with lc_check: false" ec2_asg: name: "{{ resource_prefix }}-asg" @@ -496,32 +478,18 @@ lc_check: false wait_timeout: 1800 state: present - async: 1800 - poll: 0 - register: asg_job - - name: get ec2_asg info for 3 minutes + # Collect ec2_asg_info + - name: get ec2_asg info ec2_asg_info: name: "{{ resource_prefix }}-asg" register: output - loop_control: - pause: 15 - loop: "{{ range(12) | list }}" - # Since we started with 3 servers and replace all of them. - # We should see 6 servers total. + # Since we started with 3 instances and replace all of them. + # We should see only 3 instances total. - assert: that: - - output | community.general.json_query(inst_id_json_query) | unique | length == 6 - vars: - inst_id_json_query: results[].results[].instances[].instance_id - - - name: Ensure ec2_asg task completes - async_status: jid="{{ asg_job.ansible_job_id }}" - register: status - until: status is finished - retries: 200 - delay: 15 + - output.results[0].instances | length == 3 # ============================================================ @@ -533,10 +501,6 @@ async: 400 # Create new asg with replace_all_instances and lc_check:false - - # Note - this is done async so we can query asg_facts during - # the execution. Issues #28087 results in correct - # end result, but spin up extraneous instances during execution. - name: "new asg with lc_check: false" ec2_asg: name: "{{ resource_prefix }}-asg" @@ -554,33 +518,18 @@ lc_check: false wait_timeout: 1800 state: present - async: 1800 - poll: 0 - register: asg_job - # Collect ec2_asg_info for 3 minutes + # Collect ec2_asg_info - name: get ec2_asg information ec2_asg_info: name: "{{ resource_prefix }}-asg" register: output - loop_control: - pause: 15 - loop: "{{ range(12) | list }}" # Get all instance_ids we saw and assert we saw number expected # Should only see 3 (don't replace instances we just created) - assert: that: - - output | community.general.json_query(inst_id_json_query) | unique | length == 3 - vars: - inst_id_json_query: results[].results[].instances[].instance_id - - - name: Ensure ec2_asg task completes - async_status: jid="{{ asg_job.ansible_job_id }}" - register: status - until: status is finished - retries: 200 - delay: 15 + - output.results[0].instances | length == 3 # we need a launch template, otherwise we cannot test the mixed instance policy - name: create launch template for autoscaling group to test its mixed instances policy @@ -610,7 +559,7 @@ mixed_instances_policy: instance_types: - t3.micro - - t3a.micro + - t2.nano wait_for_instances: yes register: output @@ -618,7 +567,7 @@ that: - "output.mixed_instances_policy | length == 2" - "output.mixed_instances_policy[0] == 't3.micro'" - - "output.mixed_instances_policy[1] == 't3a.micro'" + - "output.mixed_instances_policy[1] == 't2.nano'" - name: update autoscaling group with mixed-instances policy with instances_distribution ec2_asg: @@ -633,7 +582,7 @@ mixed_instances_policy: instance_types: - t3.micro - - t3a.micro + - t2.nano instances_distribution: on_demand_percentage_above_base_capacity: 0 spot_allocation_strategy: capacity-optimized @@ -643,7 +592,7 @@ - assert: that: - "output.mixed_instances_policy_full['launch_template']['overrides'][0]['instance_type'] == 't3.micro'" - - "output.mixed_instances_policy_full['launch_template']['overrides'][1]['instance_type'] == 't3a.micro'" + - "output.mixed_instances_policy_full['launch_template']['overrides'][1]['instance_type'] == 't2.nano'" - "output.mixed_instances_policy_full['instances_distribution']['on_demand_percentage_above_base_capacity'] == 0" - "output.mixed_instances_policy_full['instances_distribution']['spot_allocation_strategy'] == 'capacity-optimized'" @@ -683,7 +632,7 @@ - name: update autoscaling group with tg1 ec2_asg: name: "{{ resource_prefix }}-asg" - launch_template: + launch_template: launch_template_name: "{{ resource_prefix }}-lt" target_group_arns: - "{{ out_tg1.target_group_arn }}" @@ -701,7 +650,7 @@ - name: update autoscaling group add tg2 ec2_asg: name: "{{ resource_prefix }}-asg" - launch_template: + launch_template: launch_template_name: "{{ resource_prefix }}-lt" target_group_arns: - "{{ out_tg1.target_group_arn }}" @@ -720,7 +669,7 @@ - name: update autoscaling group remove tg1 ec2_asg: name: "{{ resource_prefix }}-asg" - launch_template: + launch_template: launch_template_name: "{{ resource_prefix }}-lt" target_group_arns: - "{{ out_tg2.target_group_arn }}" @@ -739,7 +688,7 @@ - name: update autoscaling group remove tg2 and add tg1 ec2_asg: name: "{{ resource_prefix }}-asg" - launch_template: + launch_template: launch_template_name: "{{ resource_prefix }}-lt" target_group_arns: - "{{ out_tg1.target_group_arn }}" @@ -758,7 +707,7 @@ - name: target group no change ec2_asg: name: "{{ resource_prefix }}-asg" - launch_template: + launch_template: launch_template_name: "{{ resource_prefix }}-lt" target_group_arns: - "{{ out_tg1.target_group_arn }}" @@ -789,7 +738,6 @@ retries: 10 # Remove the testing dependencies - - name: remove target group elb_target_group: name: "{{ item }}" From d0596e3734170873b0166aae01630eb51f880b4f Mon Sep 17 00:00:00 2001 From: Joseph Torcasso <87090265+jatorcasso@users.noreply.github.com> Date: Wed, 16 Feb 2022 12:04:20 -0500 Subject: [PATCH 26/31] Stabilize ec2_eip module (#936) Stabilize ec2_eip module SUMMARY fixed check_mode issues added integration tests for check_mode / idempotency updated json returned when state = absent for clarity removed json_query references fixes #159 Depends-On: ansible-collections/amazon.aws#672 ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_eip Reviewed-by: Mark Woolley Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Jill R --- .../fragments/936-stabilize-ec2-eip.yml | 2 + plugins/modules/ec2_eip.py | 54 +- plugins/modules/ec2_eip_info.py | 2 +- tests/integration/targets/ec2_eip/aliases | 3 +- .../targets/ec2_eip/tasks/main.yml | 2298 ++++++++++------- 5 files changed, 1424 insertions(+), 935 deletions(-) create mode 100644 changelogs/fragments/936-stabilize-ec2-eip.yml diff --git a/changelogs/fragments/936-stabilize-ec2-eip.yml b/changelogs/fragments/936-stabilize-ec2-eip.yml new file mode 100644 index 00000000000..eec8e2cf49c --- /dev/null +++ b/changelogs/fragments/936-stabilize-ec2-eip.yml @@ -0,0 +1,2 @@ +minor_changes: + - ec2_eip - refactor module by fixing check_mode and more clear return obj. added integration tests (https://github.com/ansible-collections/community.aws/pull/936) \ No newline at end of file diff --git a/plugins/modules/ec2_eip.py b/plugins/modules/ec2_eip.py index ca883e5f715..e0031eaf10a 100644 --- a/plugins/modules/ec2_eip.py +++ b/plugins/modules/ec2_eip.py @@ -27,8 +27,8 @@ public_ip: description: - The IP address of a previously allocated EIP. - - When I(public_ip=present) and device is specified, the EIP is associated with the device. - - When I(public_ip=absent) and device is specified, the EIP is disassociated from the device. + - When I(state=present) and device is specified, the EIP is associated with the device. + - When I(state=absent) and device is specified, the EIP is disassociated from the device. aliases: [ ip ] type: str state: @@ -328,7 +328,7 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): except is_boto3_error_code('InvalidAddress.NotFound') as e: # If we're releasing and we can't find it, it's already gone... if module.params.get('state') == 'absent': - module.exit_json(changed=False) + module.exit_json(changed=False, disassociated=False, released=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") addresses = addresses["Addresses"] @@ -385,6 +385,8 @@ def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True try: + if check_mode: + return None, True result = ec2.allocate_address(Domain=domain, aws_retry=True), True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") @@ -493,8 +495,11 @@ def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True) def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): - # type: (EC2Connection, str, bool, str) -> Address + # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address """ Overrides botocore's allocate_address function to support BYOIP """ + if check_mode: + return None + params = {} if domain is not None: @@ -503,9 +508,6 @@ def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool if public_ipv4_pool is not None: params['PublicIpv4Pool'] = public_ipv4_pool - if check_mode: - params['DryRun'] = 'true' - try: result = ec2.allocate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -606,19 +608,33 @@ def main(): reuse_existing_ip_allowed, allow_reassociation, module.check_mode, is_instance=is_instance ) + if 'allocation_id' not in result: + # Don't check tags on check_mode here - no EIP to pass through + module.exit_json(**result) else: if address: - changed = False + result = { + 'changed': False, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } else: address, changed = allocate_address( ec2, module, domain, reuse_existing_ip_allowed, module.check_mode, tag_dict, public_ipv4_pool ) - result = { - 'changed': changed, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] - } + if address: + result = { + 'changed': changed, + 'public_ip': address['PublicIp'], + 'allocation_id': address['AllocationId'] + } + else: + # Don't check tags on check_mode here - no EIP to pass through + result = { + 'changed': changed + } + module.exit_json(**result) result['changed'] |= ensure_ec2_tags( ec2, module, result['allocation_id'], @@ -633,21 +649,21 @@ def main(): released = release_address(ec2, module, address, module.check_mode) result = { 'changed': True, - 'disassociated': disassociated, - 'released': released + 'disassociated': disassociated['changed'], + 'released': released['changed'] } else: result = { 'changed': disassociated['changed'], - 'disassociated': disassociated, - 'released': {'changed': False} + 'disassociated': disassociated['changed'], + 'released': False } else: released = release_address(ec2, module, address, module.check_mode) result = { 'changed': released['changed'], - 'disassociated': {'changed': False}, - 'released': released + 'disassociated': False, + 'released': released['changed'] } except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: diff --git a/plugins/modules/ec2_eip_info.py b/plugins/modules/ec2_eip_info.py index 4f560429e12..31d8145742b 100644 --- a/plugins/modules/ec2_eip_info.py +++ b/plugins/modules/ec2_eip_info.py @@ -44,7 +44,7 @@ register: my_vm_eips - ansible.builtin.debug: - msg: "{{ my_vm_eips.addresses | json_query(\"[?private_ip_address=='10.0.0.5']\") }}" + msg: "{{ my_vm_eips.addresses | selectattr('private_ip_address', 'equalto', '10.0.0.5') }}" - name: List all EIP addresses for several VMs. community.aws.ec2_eip_info: diff --git a/tests/integration/targets/ec2_eip/aliases b/tests/integration/targets/ec2_eip/aliases index f396da939f0..78305e98909 100644 --- a/tests/integration/targets/ec2_eip/aliases +++ b/tests/integration/targets/ec2_eip/aliases @@ -1,4 +1,5 @@ # https://github.com/ansible-collections/community.aws/issues/159 -unstable +# unstable cloud/aws +ec2_eip_info \ No newline at end of file diff --git a/tests/integration/targets/ec2_eip/tasks/main.yml b/tests/integration/targets/ec2_eip/tasks/main.yml index 66e2eb5b4ba..9b93f107e2d 100644 --- a/tests/integration/targets/ec2_eip/tasks/main.yml +++ b/tests/integration/targets/ec2_eip/tasks/main.yml @@ -1,4 +1,7 @@ - name: Integration testing for ec2_eip + collections: + - amazon.aws + module_defaults: group/aws: aws_access_key: '{{ aws_access_key }}' @@ -7,922 +10,1389 @@ region: '{{ aws_region }}' ec2_eip: in_vpc: true - collections: - - amazon.aws + block: - # ===================================================== - - name: Get the current caller identity facts - aws_caller_info: null - register: caller_info - - name: list available AZs - aws_az_info: null - register: region_azs - - name: create a VPC - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - state: present - cidr_block: '{{ vpc_cidr }}' - tags: - AnsibleEIPTest: Pending - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - register: vpc_result - - name: create subnet - ec2_vpc_subnet: - cidr: '{{ subnet_cidr }}' - az: '{{ subnet_az }}' - vpc_id: '{{ vpc_result.vpc.id }}' - state: present - register: vpc_subnet_create - - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw - - name: "create a security group" - ec2_group: - state: present - name: '{{ resource_prefix }}-sg' - description: a security group for ansible tests - vpc_id: '{{ vpc_result.vpc.id }}' - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - register: security_group - - name: Create instance for attaching - ec2_instance: - name: '{{ resource_prefix }}-instance' - image_id: '{{ ec2_ami_id }}' - security_group: '{{ security_group.group_id }}' - vpc_subnet_id: '{{ vpc_subnet_create.subnet.id }}' - wait: yes - state: running - register: create_ec2_instance_result - - # ===================================================== - - name: Look for signs of concurrent EIP tests. Pause if they are running or their prefix comes before ours. - vars: - running_query: vpcs[?tags.AnsibleEIPTest=='Running'] - pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix - ec2_vpc_net_info: - filters: - tag:AnsibleEIPTest: - - Pending - - Running - register: vpc_info - retries: 120 - delay: 5 - until: - - ( vpc_info | community.general.json_query(running_query) | length == 0 ) - - ( vpc_info | community.general.json_query(pending_query) | sort | first == resource_prefix ) - - name: Make a crude lock - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - state: present - cidr_block: '{{ vpc_cidr }}' - tags: - AnsibleEIPTest: Running - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - - # ===================================================== - - name: Get current state of EIPs - ec2_eip_info: null - register: eip_info_start - - name: Require that there are no free IPs when we start, otherwise we can't test things properly - assert: - that: - - eip_info_start is defined - - '"addresses" in eip_info_start' - - ( eip_info_start.addresses | length ) == ( eip_info_start | community.general.json_query("addresses[].association_id") | length ) - - - name: Allocate a new eip (no conditions) - ec2_eip: - state: present - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - register: eip - - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip is defined - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - - assert: - that: - - '"addresses" in eip_info' - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - - ec2_eip_info: - filters: - allocation-id: '{{ eip.allocation_id }}' - - assert: - that: - - '"addresses" in eip_info' - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is changed - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - - name: Allocate a new eip - attempt reusing unallocated ones (none available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - register: eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip is defined - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: Re-Allocate a new eip - attempt reusing unallocated ones (one available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - register: reallocate_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - reallocate_eip is defined - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - eip_release is defined - - eip_release is changed - - - name: Allocate a new eip - ec2_eip: - state: present - register: eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip is defined - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: Match an existing eip (changed == false) - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - register: reallocate_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - reallocate_eip is defined - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is changed - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - - name: Allocate a new eip (no tags) - ec2_eip: - state: present - register: eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip is defined - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: attempt reusing an existing eip with a tag (No match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - register: no_tagged_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - no_tagged_eip is defined - - no_tagged_eip is changed - - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr ) - - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) - - - name: tag eip so we can try matching it - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - Team: Frontend + - name: Get the current caller identity facts + aws_caller_info: + register: caller_info - - name: attempt reusing an existing eip with a tag (Match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - register: reallocate_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - reallocate_eip is defined - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) - - - name: attempt reusing an existing eip with a tag and it's value (no match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - tag_value: Backend - register: backend_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - backend_eip is defined - - backend_eip is changed - - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr ) - - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) - - - name: tag eip so we can try matching it - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - Team: Backend + - name: List available AZs + aws_az_info: + register: region_azs - - name: attempt reusing an existing eip with a tag and it's value (match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - tag_value: Backend - register: reallocate_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - reallocate_eip is defined - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != "" - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != "" - - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) - - - name: Release backend_eip - ec2_eip: - state: absent - public_ip: '{{ backend_eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is changed - - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) - - - name: Release no_tagged_eip - ec2_eip: - state: absent - public_ip: '{{ no_tagged_eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is changed - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is changed - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - - name: allocate a new eip from a pool - ec2_eip: - state: present - public_ipv4_pool: amazon - register: eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip is defined - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - - name: create ENI A - ec2_eni: - subnet_id: '{{ vpc_subnet_create.subnet.id }}' - register: eni_create_a - - - name: create ENI B - ec2_eni: - subnet_id: '{{ vpc_subnet_create.subnet.id }}' - register: eni_create_b - - - name: Attach EIP to ENI A - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is changed - - eip_info.addresses | length == 1 - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - - eip_info.addresses[0].network_interface_owner_id == caller_info.account - - - name: Re-Attach EIP to ENI A (no change) - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is not changed - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - - - name: Attach EIP to ENI B (should fail, already associated) - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: associate_eip - ignore_errors: true - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is failed - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - - - name: Attach EIP to ENI B - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - allow_reassociation: true - register: associate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is changed - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) - - - name: Detach EIP from ENI B, without enabling release on disassociation - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: disassociate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is changed - - eip_info.addresses | length == 1 - - - name: Re-detach EIP from ENI B, without enabling release on disassociation - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: associate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is not changed - - eip_info.addresses | length == 1 - - - name: Attach EIP to ENI A - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is changed - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - - name: Detach EIP from ENI A, enabling release on disassociation - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - release_on_disassociation: true - register: disassociate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is changed - - eip_info.addresses | length == 0 - - - name: Re-detach EIP from ENI A, enabling release on disassociation - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - release_on_disassociation: true - register: associate_eip - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - assert: - that: - - associate_eip is defined - - associate_eip is not changed - - eip_info.addresses | length == 0 - - - ec2_eip_info: null - register: eip_info - - assert: - that: - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - - name: Cleanup ENI B - ec2_eni: - state: absent - eni_id: '{{ eni_create_b.interface.id }}' - - - name: Cleanup ENI A - ec2_eni: - state: absent - eni_id: '{{ eni_create_a.interface.id }}' - - - name: Attach eip to an EC2 instance - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - state: present - release_on_disassociation: yes - register: instance_eip - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - assert: - that: - - instance_eip is success - - eip_info.addresses[0].allocation_id is defined - - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] }}' - - - name: Attach eip to an EC2 instance with private Ip specified - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address }}' - state: present - release_on_disassociation: yes - register: instance_eip - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - assert: - that: - - instance_eip is success - - eip_info.addresses[0].allocation_id is defined - - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] }}' - - # ===================================================== - - - name: Cleanup instance - ec2_instance: - instance_ids: '{{ create_ec2_instance_result.instance_ids }}' - state: absent - - - name: Cleanup instance eip - ec2_eip: - state: absent - public_ip: '{{ instance_eip.public_ip }}' - register: eip_cleanup - retries: 5 - delay: 5 - until: eip_cleanup is successful - - - name: Cleanup IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw - - - name: Cleanup security group - ec2_group: - state: absent - name: '{{ resource_prefix }}-sg' - - - name: Cleanup Subnet - ec2_vpc_subnet: - state: absent - cidr: '{{ subnet_cidr }}' - vpc_id: '{{ vpc_result.vpc.id }}' - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - ignore_errors: true + - name: Create a VPC + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + state: present + cidr_block: '{{ vpc_cidr }}' + tags: + AnsibleEIPTest: Pending + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + register: vpc_result + + - name: Look for signs of concurrent EIP tests. Pause if they are running or their prefix comes before ours. + vars: + running_query: vpcs[?tags.AnsibleEIPTest=='Running'] + pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix + ec2_vpc_net_info: + filters: + tag:AnsibleEIPTest: + - Pending + - Running + register: vpc_info + retries: 10 + delay: 5 + until: + - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', 'Running') | length == 0 ) + - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', 'Pending') | map(attribute='AnsibleEIPTestPrefix') | sort | first == resource_prefix ) + + - name: Create subnet + ec2_vpc_subnet: + cidr: '{{ subnet_cidr }}' + az: '{{ subnet_az }}' + vpc_id: '{{ vpc_result.vpc.id }}' + state: present + register: vpc_subnet_create + + - name: Create internet gateway + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw + + - name: Create security group + ec2_group: + state: present + name: '{{ resource_prefix }}-sg' + description: a security group for ansible tests + vpc_id: '{{ vpc_result.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + register: security_group + + - name: Create instance for attaching + ec2_instance: + name: '{{ resource_prefix }}-instance' + image_id: '{{ ec2_ami_id }}' + security_group: '{{ security_group.group_id }}' + vpc_subnet_id: '{{ vpc_subnet_create.subnet.id }}' + wait: yes + state: running + register: create_ec2_instance_result + + - name: Create ENI A + ec2_eni: + subnet_id: '{{ vpc_subnet_create.subnet.id }}' + register: eni_create_a + + - name: Create ENI B + ec2_eni: + subnet_id: '{{ vpc_subnet_create.subnet.id }}' + register: eni_create_b + + - name: Make a crude lock + ec2_vpc_net: + name: '{{ resource_prefix }}-vpc' + state: present + cidr_block: '{{ vpc_cidr }}' + tags: + AnsibleEIPTest: Running + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + + - name: Get current state of EIPs + ec2_eip_info: + register: eip_info_start + + - name: Require that there are no free IPs when we start, otherwise we can't test things properly + assert: + that: + - '"addresses" in eip_info_start' + - ( eip_info_start.addresses | length ) == ( eip_info_start.addresses | select('match', 'association_id') | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new EIP with no conditions - check_mode + ec2_eip: + state: present + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + register: eip + check_mode: yes + + - assert: + that: + - eip is changed + + - name: Allocate a new EIP with no conditions + ec2_eip: + state: present + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + register: eip + + - ec2_eip_info: + register: eip_info + check_mode: yes + + - assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Get EIP info via public ip + ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - '"addresses" in eip_info' + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + + - name: Get EIP info via allocation id + ec2_eip_info: + filters: + allocation-id: '{{ eip.allocation_id }}' + register: eip_info + + - assert: + that: + - '"addresses" in eip_info' + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + + - name: Allocate a new ip (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + register: eip + check_mode: yes + + - assert: + that: + - eip is not changed + + - name: Allocate a new ip (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + register: eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip is not changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Release EIP - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + check_mode: yes + + - assert: + that: + - eip_release.changed + + - name: Release eip + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip_release.changed + - not eip_release.disassociated + - eip_release.released + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + - name: Release EIP (idempotence) - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + check_mode: yes + + - assert: + that: + - eip_release is not changed + + - name: Release EIP (idempotence) + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + register: eip_release + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - not eip_release.changed + - not eip_release.disassociated + - not eip_release.released + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new EIP - attempt reusing unallocated ones (none available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: eip + check_mode: yes + + - assert: + that: + - eip is changed + + - name: Allocate a new EIP - attempt reusing unallocated ones (none available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: reallocate_eip + check_mode: yes + + - assert: + that: + - reallocate_eip is not changed + + - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: reallocate_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: attempt reusing an existing EIP with a tag (No match available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: no_tagged_eip + check_mode: yes + + - assert: + that: + - no_tagged_eip is changed + + - name: attempt reusing an existing EIP with a tag (No match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: no_tagged_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - no_tagged_eip is changed + - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr ) + - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Tag EIP so we can try matching it + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + Team: Frontend + + - name: Attempt reusing an existing EIP with a tag (Match available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: reallocate_eip + check_mode: yes + + - assert: + that: + - reallocate_eip is not changed + + - name: Attempt reusing an existing EIP with a tag (Match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: reallocate_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) + + - name: Attempt reusing an existing EIP with a tag and it's value (no match available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: backend_eip + check_mode: yes + + - assert: + that: + - backend_eip is changed + + - name: Attempt reusing an existing EIP with a tag and it's value (no match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: backend_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - backend_eip is changed + - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr ) + - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Tag EIP so we can try matching it + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + Team: Backend + + - name: Attempt reusing an existing EIP with a tag and it's value (match available) - check_mode + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: reallocate_eip + check_mode: yes + + - assert: + that: + - reallocate_eip is not changed + + - name: Attempt reusing an existing EIP with a tag and it's value (match available) + ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: reallocate_eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != "" + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != "" + - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) + + - name: Release backend_eip + ec2_eip: + state: absent + public_ip: '{{ backend_eip.public_ip }}' + + - name: Release no_tagged_eip + ec2_eip: + state: absent + public_ip: '{{ no_tagged_eip.public_ip }}' + + - name: Release eip + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new EIP from a pool - check_mode + ec2_eip: + state: present + public_ipv4_pool: amazon + register: eip + check_mode: yes + + - assert: + that: + - eip is changed + + - name: Allocate a new EIP from a pool + ec2_eip: + state: present + public_ipv4_pool: amazon + register: eip + + - ec2_eip_info: + register: eip_info + + - assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to ENI A - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is changed + + - name: Attach EIP to ENI A + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is changed + - eip_info.addresses | length == 1 + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + - eip_info.addresses[0].network_interface_owner_id == caller_info.account + + - name: Attach EIP to ENI A (idempotence) - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is not changed + + - name: Attach EIP to ENI A (idempotence) + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is not changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to ENI B (should fail, already associated) + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: associate_eip + ignore_errors: true + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is failed + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + + - name: Attach EIP to ENI B - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is changed + + - name: Attach EIP to ENI B + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + + - name: Attach EIP to ENI B (idempotence) - check_mode + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + check_mode: yes + + - assert: + that: + - associate_eip is not changed + + - name: Attach EIP to ENI B (idempotence) + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + allow_reassociation: true + register: associate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - associate_eip is not changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + + # ------------------------------------------------------------------------------------------ + + - name: Detach EIP from ENI B, without enabling release on disassociation - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from ENI B, without enabling release on disassociation + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 1 + + - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_b.interface.id }}' + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 1 + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to ENI A + ec2_eip: + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + register: associate_eip + + - name: Detach EIP from ENI A, enabling release on disassociation - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from ENI A, enabling release on disassociation + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - disassociate_eip.released + - eip_info.addresses | length == 0 + + - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) - check_mode + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + device_id: '{{ eni_create_a.interface.id }}' + release_on_disassociation: true + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to an EC2 instance - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is changed + + - name: Attach EIP to an EC2 instance + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] }}' + + - name: Attach EIP to an EC2 instance (idempotence) - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is not changed + + - name: Attach EIP to an EC2 instance (idempotence) + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is not changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] }}' + + # ------------------------------------------------------------------------------------------ + + - name: Detach EIP from EC2 instance, without enabling release on disassociation - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + check_mode: yes + + - assert: + that: + - detach_eip is changed + + - name: Detach EIP from EC2 instance, without enabling release on disassociation + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - detach_eip.changed + - detach_eip.disassociated + - not detach_eip.released + - eip_info.addresses | length == 1 + + - name: Detach EIP from EC2 instance, without enabling release on disassociation (idempotence) - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + check_mode: yes + + - assert: + that: + - detach_eip is not changed + + - name: Detach EIP from EC2 instance, without enabling release on disassociation (idempotence) + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + register: detach_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - not detach_eip.changed + - not detach_eip.disassociated + - not detach_eip.released + - eip_info.addresses | length == 1 + + - name: Release EIP + ec2_eip: + state: absent + public_ip: '{{ instance_eip.public_ip }}' + + # ------------------------------------------------------------------------------------------ + + - name: Attach EIP to an EC2 instance with private Ip specified - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is changed + + - name: Attach EIP to an EC2 instance with private Ip specified + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] }}' + + - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) - check_mode + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address }}' + state: present + release_on_disassociation: yes + register: instance_eip + check_mode: yes + + - assert: + that: + - instance_eip is not changed + + - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) + ec2_eip: + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address }}' + state: present + release_on_disassociation: yes + register: instance_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - instance_eip is not changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] }}' + + # ------------------------------------------------------------------------------------------ + + - name: Detach EIP from EC2 instance, enabling release on disassociation - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from EC2 instance, enabling release on disassociation + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - disassociate_eip.released + - eip_info.addresses | length == 0 + + - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) - check_mode + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + check_mode: yes + + - assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) + ec2_eip: + state: absent + device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' + release_on_disassociation: yes + register: disassociate_eip + + - ec2_eip_info: + filters: + public-ip: '{{ instance_eip.public_ip }}' + register: eip_info + + - assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Allocate a new eip + ec2_eip: + state: present + register: eip + + - name: Tag EIP - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: 'another Value {{ resource_prefix }}' + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is changed + + - name: Tag EIP + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: 'another Value {{ resource_prefix }}' + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Tag EIP (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: 'another Value {{ resource_prefix }}' + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is not changed + + - name: Tag EIP (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + AnsibleEIPTestPrefix: '{{ resource_prefix }}' + another_tag: 'another Value {{ resource_prefix }}' + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + # ------------------------------------------------------------------------------------------ + + - name: Add another Tag - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: False + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is changed + + - name: Add another Tag + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: False + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Add another Tag (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: False + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is not changed + + - name: Add another Tag (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: False + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + # ------------------------------------------------------------------------------------------ + + - name: Purge tags - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: True + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is changed + + - name: Purge tags + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: True + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' + - '"another_tag" not in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + - name: Purge tags (idempotence) - check_mode + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: True + register: tag_eip + check_mode: yes + + - assert: + that: + - tag_eip is not changed + + - name: Purge tags (idempotence) + ec2_eip: + state: present + public_ip: '{{ eip.public_ip }}' + tags: + "third tag": 'Third tag - {{ resource_prefix }}' + purge_tags: True + register: tag_eip + + - ec2_eip_info: + filters: + public-ip: '{{ eip.public_ip }}' + register: eip_info + + - assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' + - '"another_tag" not in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + # ----- Cleanup ------------------------------------------------------------------------------ - - name: allocate a new eip - ec2_eip: - state: present - register: eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip is defined - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) - - ############################################################################################# - - - name: Tag EIP - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - another_tag: 'another Value {{ resource_prefix }}' - register: tag_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - tag_eip is defined - - tag_eip is changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - - name: Tag EIP - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - another_tag: 'another Value {{ resource_prefix }}' - register: tag_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - tag_eip is defined - - tag_eip is not changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - - name: Add another Tag - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - "third tag": 'Third tag - {{ resource_prefix }}' - purge_tags: False - register: tag_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - tag_eip is defined - - tag_eip is changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix - - - name: Add another Tag - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - "third tag": 'Third tag - {{ resource_prefix }}' - purge_tags: False - register: tag_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - tag_eip is defined - - tag_eip is not changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix - - - name: Purge most tags - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - "third tag": 'Third tag - {{ resource_prefix }}' - purge_tags: True - register: tag_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - tag_eip is defined - - tag_eip is changed - - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' - - '"another_tag" not in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix - - - name: Purge most tags - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - "third tag": 'Third tag - {{ resource_prefix }}' - purge_tags: True - register: tag_eip - - ec2_eip_info: null - register: eip_info - - assert: - that: - - tag_eip is defined - - tag_eip is not changed - - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' - - '"another_tag" not in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix - - ############################################################################################# - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is changed - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - name: Rerelease eip (no change) - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - ec2_eip_info: null - register: eip_info - - assert: - that: - - eip_release is defined - - eip_release is not changed - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - name: Cleanup VPC - ec2_vpc_net: - state: absent - name: '{{ resource_prefix }}-vpc' - cidr_block: '{{ vpc_cidr }}' - - - name: Create an EIP outside a VPC - ec2_eip: - state: present - in_vpc: '{{ omit }}' - register: unbound_eip - - assert: - that: - - unbound_eip is successful - - unbound_eip is changed - - name: Release EIP - ec2_eip: - state: absent - public_ip: '{{ unbound_eip.public_ip }}' - register: release_unbound_eip - - assert: - that: - - release_unbound_eip is successful - - release_unbound_eip is changed - # ===================================================== always: - - name: Cleanup instance (by id) - ec2_instance: - instance_ids: '{{ create_ec2_instance_result.instance_ids }}' - state: absent - wait: true - ignore_errors: true - - name: Cleanup instance (by name) - ec2_instance: - name: '{{ resource_prefix }}-instance' - state: absent - wait: true - ignore_errors: true - - name: Cleanup ENI A - ec2_eni: - state: absent - eni_id: '{{ eni_create_a.interface.id }}' - ignore_errors: true - - name: Cleanup ENI B - ec2_eni: - state: absent - eni_id: '{{ eni_create_b.interface.id }}' - ignore_errors: true - - name: Cleanup instance eip - ec2_eip: - state: absent - public_ip: '{{ instance_eip.public_ip }}' - retries: 5 - delay: 5 - until: eip_cleanup is successful - ignore_errors: true - - name: Cleanup IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw - ignore_errors: true - - name: Cleanup security group - ec2_group: - state: absent - name: '{{ resource_prefix }}-sg' - ignore_errors: true - - name: Cleanup Subnet - ec2_vpc_subnet: - state: absent - cidr: '{{ subnet_cidr }}' - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: true - - name: Cleanup eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - when: eip is changed - ignore_errors: true - - name: Cleanup reallocate_eip - ec2_eip: - state: absent - public_ip: '{{ reallocate_eip.public_ip }}' - when: reallocate_eip is changed - ignore_errors: true - - name: Cleanup backend_eip - ec2_eip: - state: absent - public_ip: '{{ backend_eip.public_ip }}' - when: backend_eip is changed - ignore_errors: true - - name: Cleanup no_tagged_eip - ec2_eip: - state: absent - public_ip: '{{ no_tagged_eip.public_ip }}' - when: no_tagged_eip is changed - ignore_errors: true - - name: Cleanup unbound_eip - ec2_eip: - state: absent - public_ip: '{{ unbound_eip.public_ip }}' - when: unbound_eip is changed - ignore_errors: true - - name: Cleanup VPC - ec2_vpc_net: - state: absent - name: '{{ resource_prefix }}-vpc' - cidr_block: '{{ vpc_cidr }}' - ignore_errors: true + + - name: Cleanup instance (by id) + ec2_instance: + instance_ids: '{{ create_ec2_instance_result.instance_ids }}' + state: absent + wait: true + ignore_errors: true + + - name: Cleanup instance (by name) + ec2_instance: + name: '{{ resource_prefix }}-instance' + state: absent + wait: true + ignore_errors: true + + - name: Cleanup ENI A + ec2_eni: + state: absent + eni_id: '{{ eni_create_a.interface.id }}' + ignore_errors: true + + - name: Cleanup ENI B + ec2_eni: + state: absent + eni_id: '{{ eni_create_b.interface.id }}' + ignore_errors: true + + - name: Cleanup instance eip + ec2_eip: + state: absent + public_ip: '{{ instance_eip.public_ip }}' + retries: 5 + delay: 5 + until: eip_cleanup is successful + ignore_errors: true + + - name: Cleanup IGW + ec2_vpc_igw: + state: absent + vpc_id: '{{ vpc_result.vpc.id }}' + register: vpc_igw + ignore_errors: true + + - name: Cleanup security group + ec2_group: + state: absent + name: '{{ resource_prefix }}-sg' + ignore_errors: true + + - name: Cleanup Subnet + ec2_vpc_subnet: + state: absent + cidr: '{{ subnet_cidr }}' + vpc_id: '{{ vpc_result.vpc.id }}' + ignore_errors: true + + - name: Cleanup eip + ec2_eip: + state: absent + public_ip: '{{ eip.public_ip }}' + ignore_errors: true + + - name: Cleanup reallocate_eip + ec2_eip: + state: absent + public_ip: '{{ reallocate_eip.public_ip }}' + ignore_errors: true + + - name: Cleanup backend_eip + ec2_eip: + state: absent + public_ip: '{{ backend_eip.public_ip }}' + ignore_errors: true + + - name: Cleanup no_tagged_eip + ec2_eip: + state: absent + public_ip: '{{ no_tagged_eip.public_ip }}' + ignore_errors: true + + - name: Cleanup VPC + ec2_vpc_net: + state: absent + name: '{{ resource_prefix }}-vpc' + cidr_block: '{{ vpc_cidr }}' + ignore_errors: true From 612c8d9df453cad6e44bcbed744e55a9a57f25c2 Mon Sep 17 00:00:00 2001 From: tjarra Date: Thu, 17 Feb 2022 15:37:56 -0300 Subject: [PATCH 27/31] fixed method validate_tags and adjusted the documentation --- plugins/modules/aws_eks_fargate_profile.py | 30 +++++++++++++++++----- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/plugins/modules/aws_eks_fargate_profile.py b/plugins/modules/aws_eks_fargate_profile.py index 2fba3561c1f..053d9588314 100644 --- a/plugins/modules/aws_eks_fargate_profile.py +++ b/plugins/modules/aws_eks_fargate_profile.py @@ -32,11 +32,18 @@ required: True type: list elements: str - namespace: - description: Name of Namespace + selectors: + description: A list of selectors to use in fargate profile required: True type: list - elements: str + suboptions: + namespace: + description: A namespace used in fargate profile + type: str + labels: + description: A dictionary of labels used in fargate profile + type: dict + elements: str state: description: Create or delete the Fargate Profile choices: @@ -44,6 +51,10 @@ - present default: present type: str + tags: + description: A dictionary of resource tags + type: dict + elements: str wait: description: >- Specifies whether the module waits until the profile is created or deleted before moving on. @@ -136,6 +147,13 @@ sample: - label1: test1 - label2: test2 +tags: + description: A dictionary of resource tags + returned: when state is present + type: dict + sample: + foo: bar + env: test status: description: status of the EKS Fargate Profile returned: when state is present @@ -158,7 +176,7 @@ def validate_tags(client, module, fargate_profile): changed = False - existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn']) + existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile['fargateProfileArn'])['tags'] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, module.params.get('tags'), module.params.get('purge_tags')) @@ -166,7 +184,7 @@ def validate_tags(client, module, fargate_profile): if not module.check_mode: changed = True try: - client.untag_resource(aws_retry=True, ResourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove) + client.untag_resource(resourceArn=fargate_profile['fargateProfileArn'], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) @@ -174,7 +192,7 @@ def validate_tags(client, module, fargate_profile): if not module.check_mode: changed = True try: - client.tag_resource(aws_retry=True, ResourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add) + client.tag_resource(resourceArn=fargate_profile['fargateProfileArn'], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg='Unable to set tags for Fargate Profile %s' % module.params.get('name')) From bb1e4dffe5dfedd5df400d8260db3d7450f753e1 Mon Sep 17 00:00:00 2001 From: tjarra Date: Thu, 17 Feb 2022 16:39:03 -0300 Subject: [PATCH 28/31] added role creation as a dependency --- .../tasks/full_test.yml | 96 ++++++++++--------- 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/tests/integration/targets/aws_eks_fargate_profile/tasks/full_test.yml b/tests/integration/targets/aws_eks_fargate_profile/tasks/full_test.yml index 8f1e67c39d7..6ee4ed30d2a 100644 --- a/tests/integration/targets/aws_eks_fargate_profile/tasks/full_test.yml +++ b/tests/integration/targets/aws_eks_fargate_profile/tasks/full_test.yml @@ -1,10 +1,22 @@ +# Creating dependencies +- name: create IAM instance role + iam_role: + name: 'eksctl-{{ eks_cluster_name }}-cluster-FargatePodExecutionRole' + assume_role_policy_document: '{{ lookup(''file'',''eks-fargate-profile-trust-policy.json'') }}' + state: present + create_instance_profile: 'no' + wait: true + managed_policies: + - AmazonEKSFargatePodExecutionRolePolicy + register: iam_role_fargate + # Test - Try Create Fargate profile in non existent EKS - name: attempt to create fargate profile in non existent eks aws_eks_fargate_profile: name: '{{ eks_fargate_profile_name_a }}' state: present cluster_name: fake_cluster - role_arn: '{{ role }}' + role_arn: '{{ iam_role_fargate.arn }}' subnets: >- {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', 'private') | map(attribute='subnet.id') }} @@ -16,7 +28,6 @@ assert: that: - aws_eks_non_existent_eks is failed - - '"msg" in aws_eks_non_existent_eks' # Test - Try deleting a non-existent fargate profile - name: delete an as yet non-existent fargate profile @@ -24,7 +35,7 @@ name: fake_profile cluster_name: '{{ eks_cluster_name }}' state: absent - role_arn: '{{ role }}' + role_arn: '{{ iam_role_fargate.arn }}' subnets: >- {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', 'private') | map(attribute='subnet.id') }} @@ -43,7 +54,7 @@ name: '{{ eks_fargate_profile_name_a }}' state: present cluster_name: '{{ eks_cluster_name }}' - role_arn: '{{ role }}' + role_arn: '{{ iam_role_fargate.arn }}' subnets: >- {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', 'private') | map(attribute='subnet.id') }} @@ -60,19 +71,44 @@ - aws_eks_fargate_profile_create.status == "ACTIVE" # Try create same Fargate_profile -- name: create Fargate Profile again with same name +- name: Try create same Fargate Profile with wait aws_eks_fargate_profile: name: '{{ eks_fargate_profile_name_a }}' state: present cluster_name: '{{ eks_cluster_name }}' - role_arn: '{{ role }}' + role_arn: '{{ iam_role_fargate.arn }}' subnets: >- {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', 'private') | map(attribute='subnet.id') }} selectors: '{{ selectors }}' + wait: true + tags: '{{ tags }}' register: aws_eks_fargate_profile_create_again ignore_errors: 'yes' +- name: check that aws_eks_fargate_profile_create_again is not changed + assert: + that: + - not aws_eks_fargate_profile_create_again.changed + +# Update tags Fargate_profile +- name: update tags in Fargate Profile a with wait + aws_eks_fargate_profile: + name: '{{ eks_fargate_profile_name_a }}' + state: present + cluster_name: '{{ eks_cluster_name }}' + role_arn: '{{ iam_role_fargate.arn }}' + subnets: >- + {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', + 'private') | map(attribute='subnet.id') }} + selectors: '{{ selectors }}' + wait: true + tags: + env: test + test: foo + register: aws_eks_fargate_profile_create + ignore_errors: 'yes' + - name: check that aws_eks_fargate_profile_again is not changed assert: that: @@ -84,7 +120,7 @@ name: '{{ eks_fargate_profile_name_b }}' state: present cluster_name: '{{ eks_cluster_name }}' - role_arn: '{{ role }}' + role_arn: '{{ iam_role_fargate.arn }}' subnets: >- {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', 'private') | map(attribute='subnet.id') }} @@ -96,8 +132,9 @@ assert: that: - aws_eks_fargate_profile_create_b.changed + - aws_eks_fargate_profile_create_b.fargate_profile.status == "CREATING" -# Delete Fargate Profile A with wait +# Delete Fargate Profile A with wait (test check_profiles_status function) - name: delete a fargate profile aws_eks_fargate_profile: name: '{{ eks_fargate_profile_name_a }}' @@ -117,7 +154,7 @@ name: '{{ eks_fargate_profile_name_a }}' state: present cluster_name: '{{ eks_cluster_name }}' - role_arn: '{{ role }}' + role_arn: '{{ iam_role_fargate.arn }}' subnets: >- {{setup_subnets.results|selectattr('subnet.tags.Name', 'contains', 'public') | map(attribute='subnet.id') }} @@ -130,52 +167,17 @@ assert: that: - not aws_eks_fargate_profile_create.changed + - aws_eks_fargate_profile_create.msg.endswith("provided in Fargate Profile is not a private subnet") -# Try Remove a EKS Cluster with a fargate profile active -- name: remove EKS cluster - aws_eks_cluster: - name: '{{ eks_cluster_name }}' - state: absent - wait: 'yes' - register: eks_delete - ignore_errors: 'yes' - -- name: check that EKS cluster was not removed - assert: - that: - - not eks_delete.changed -- name: delete a fargate profile - aws_eks_fargate_profile: - name: '{{ eks_fargate_profile_name_b }}' - cluster_name: '{{ eks_cluster_name }}' - state: absent - register: aws_eks_fargate_profile_b_delete - -- name: check that aws_eks_fargate_profile is deleted - assert: - that: - - aws_eks_fargate_profile_b_delete.changed - -# Delete Fargate Profile B - name: delete a fargate profile aws_eks_fargate_profile: name: '{{ eks_fargate_profile_name_b }}' cluster_name: '{{ eks_cluster_name }}' state: absent + wait: true register: aws_eks_fargate_profile_b_delete - name: check that aws_eks_fargate_profile is deleted assert: that: - - aws_eks_fargate_profile_b_delete.changed - - - - - - - - - - - + - aws_eks_fargate_profile_b_delete.changed \ No newline at end of file From f86c20af0b092fbdff3c3c7248e9f5988ae98722 Mon Sep 17 00:00:00 2001 From: tjarra Date: Thu, 17 Feb 2022 16:39:40 -0300 Subject: [PATCH 29/31] added role creation as a dependency --- .../files/eks-fargate-profile-trust-policy.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 tests/integration/targets/aws_eks_fargate_profile/files/eks-fargate-profile-trust-policy.json diff --git a/tests/integration/targets/aws_eks_fargate_profile/files/eks-fargate-profile-trust-policy.json b/tests/integration/targets/aws_eks_fargate_profile/files/eks-fargate-profile-trust-policy.json new file mode 100644 index 00000000000..eec12ce49d7 --- /dev/null +++ b/tests/integration/targets/aws_eks_fargate_profile/files/eks-fargate-profile-trust-policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "eks-fargate-pods.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file From 9fdb5b23dd764e7266e8d5febc3126a727aeb1cd Mon Sep 17 00:00:00 2001 From: tjarra Date: Thu, 17 Feb 2022 16:40:00 -0300 Subject: [PATCH 30/31] fix some variables --- .../targets/aws_eks_fargate_profile/defaults/main.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/integration/targets/aws_eks_fargate_profile/defaults/main.yaml b/tests/integration/targets/aws_eks_fargate_profile/defaults/main.yaml index 54e8f424b35..005db76ae04 100644 --- a/tests/integration/targets/aws_eks_fargate_profile/defaults/main.yaml +++ b/tests/integration/targets/aws_eks_fargate_profile/defaults/main.yaml @@ -2,6 +2,13 @@ eks_cluster_name: "{{ resource_prefix }}" eks_fargate_profile_name_a: fp-template-a eks_fargate_profile_name_b: fp-template-b +selectors: + - namespace: "fp-default" + +tags: + foo: bar + env: test + eks_subnets: - zone: a cidr: 10.0.1.0/24 @@ -10,7 +17,7 @@ eks_subnets: - zone: b cidr: 10.0.2.0/24 type: public - tag: elbs + tag: elb eks_security_groups: - name: "{{ eks_cluster_name }}-control-plane-sg" From aede6f6467b9924caa6ce6acefd95b0643756f46 Mon Sep 17 00:00:00 2001 From: tjarra Date: Thu, 17 Feb 2022 17:04:50 -0300 Subject: [PATCH 31/31] add aliases in integration test --- tests/integration/targets/aws_eks_fargate_profile/aliases | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 tests/integration/targets/aws_eks_fargate_profile/aliases diff --git a/tests/integration/targets/aws_eks_fargate_profile/aliases b/tests/integration/targets/aws_eks_fargate_profile/aliases new file mode 100644 index 00000000000..a9f9d76a2c6 --- /dev/null +++ b/tests/integration/targets/aws_eks_fargate_profile/aliases @@ -0,0 +1,5 @@ +# reason: slow +# Tests take around 25 minutes to run +unsupported + +cloud/aws \ No newline at end of file