diff --git a/changelogs/fragments/773-elb_instance-boto3.yml b/changelogs/fragments/773-elb_instance-boto3.yml new file mode 100644 index 00000000000..09c231f27cf --- /dev/null +++ b/changelogs/fragments/773-elb_instance-boto3.yml @@ -0,0 +1,7 @@ +minor_changes: +- elb_instance - the module has been migrated to the boto3 AWS SDK (https://github.com/ansible-collections/community.aws/pull/773). +- elb_instance - added new ``updated_elbs`` return value (https://github.com/ansible-collections/community.aws/pull/773). +deprecated_features: +- elb_instance - setting of the ``ec2_elb`` fact has been deprecated and will be removed in release 4.0.0 of the collection. + See the module documentation for an alternative example using the register keyword + (https://github.com/ansible-collections/community.aws/pull/773). diff --git a/plugins/modules/elb_instance.py b/plugins/modules/elb_instance.py index b234031ee24..6116207866b 100644 --- a/plugins/modules/elb_instance.py +++ b/plugins/modules/elb_instance.py @@ -14,8 +14,6 @@ description: - This module de-registers or registers an AWS EC2 instance from the ELBs that it belongs to. - - Returns fact "ec2_elbs" which is a list of elbs attached to the instance - if state=absent is passed as an argument. - Will be marked changed when called only if there are ELBs found to operate on. author: "John Jarvis (@jarv)" options: @@ -27,13 +25,13 @@ type: str instance_id: description: - - EC2 Instance ID + - EC2 Instance ID. required: true type: str ec2_elbs: description: - - List of ELB names, required for registration. - - The ec2_elbs fact should be used if there was a previous de-register. + - List of ELB names + - Required when I(state=present). type: list elements: str enable_availability_zone: @@ -56,11 +54,12 @@ - Ignored when I(wait=no). default: 0 type: int +notes: +- The ec2_elb fact currently set by this module has been deprecated and will no + longer be set after release 4.0.0 of the collection. extends_documentation_fragment: - amazon.aws.aws - amazon.aws.ec2 -requirements: -- boto >= 2.49.0 ''' EXAMPLES = r""" @@ -70,6 +69,7 @@ community.aws.elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" state: absent + register: deregister_instances delegate_to: localhost roles: - myrole @@ -77,90 +77,120 @@ - name: Instance Register community.aws.elb_instance: instance_id: "{{ ansible_ec2_instance_id }}" - ec2_elbs: "{{ item }}" + ec2_elbs: "{{ deregister_instances.updated_elbs }}" state: present delegate_to: localhost - loop: "{{ ec2_elbs }}" """ -import time +RETURN = ''' +updated_elbs: + description: A list of ELB names that the instance has been added to or removed from. + returned: always + type: list + elements: str +''' try: - import boto - import boto.ec2 - import boto.ec2.autoscale - import boto.ec2.elb + import botocore except ImportError: - pass # Handled by HAS_BOTO + pass # Handled by AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AnsibleAWSError -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import connect_to_aws -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry class ElbManager: """Handles EC2 instance ELB registration and de-registration""" - def __init__(self, module, instance_id=None, ec2_elbs=None, - region=None, **aws_connect_params): + def __init__(self, module, instance_id=None, ec2_elbs=None): + retry_decorator = AWSRetry.jittered_backoff() self.module = module + self.client_asg = module.client('autoscaling', retry_decorator=retry_decorator) + self.client_ec2 = module.client('ec2', retry_decorator=retry_decorator) + self.client_elb = module.client('elb', retry_decorator=retry_decorator) self.instance_id = instance_id - self.region = region - self.aws_connect_params = aws_connect_params self.lbs = self._get_instance_lbs(ec2_elbs) self.changed = False + self.updated_elbs = set() def deregister(self, wait, timeout): """De-register the instance from all ELBs and wait for the ELB to report it out-of-service""" for lb in self.lbs: - initial_state = self._get_instance_health(lb) - if initial_state is None: - # Instance isn't registered with this load - # balancer. Ignore it and try the next one. + instance_ids = [i['InstanceId'] for i in lb['Instances']] + if self.instance_id not in instance_ids: continue - # The instance is not associated with any load balancer so nothing to do - if not self._get_instance_lbs(): - return + self.updated_elbs.add(lb['LoadBalancerName']) + + if self.module.check_mode: + self.changed = True + continue - lb.deregister_instances([self.instance_id]) + try: + self.client_elb.deregister_instances_from_load_balancer( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{"InstanceId": self.instance_id}], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to deregister instance from load balancer', + load_balancer=lb, instance=self.instance_id) # The ELB is changing state in some way. Either an instance that's # InService is moving to OutOfService, or an instance that's # already OutOfService is being deregistered. self.changed = True - if wait: - self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout) + for lb in self.lbs: + self._await_elb_instance_state(lb, 'Deregistered', timeout) def register(self, wait, enable_availability_zone, timeout): """Register the instance for all ELBs and wait for the ELB to report the instance in-service""" for lb in self.lbs: - initial_state = self._get_instance_health(lb) + instance_ids = [i['InstanceId'] for i in lb['Instances']] + if self.instance_id in instance_ids: + continue - if enable_availability_zone: - self._enable_availailability_zone(lb) + self.updated_elbs.add(lb['LoadBalancerName']) - lb.register_instances([self.instance_id]) + if enable_availability_zone: + self.changed |= self._enable_availailability_zone(lb) - if wait: - self._await_elb_instance_state(lb, 'InService', initial_state, timeout) - else: - # We cannot assume no change was made if we don't wait - # to find out + if self.module.check_mode: self.changed = True + continue + + try: + self.client_elb.register_instances_with_load_balancer( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{"InstanceId": self.instance_id}], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to register instance with load balancer', + load_balancer=lb, instance=self.instance_id) + + self.changed = True + + for lb in self.lbs: + self._await_elb_instance_state(lb, 'InService', timeout) + + @AWSRetry.jittered_backoff() + def _describe_elbs(self, **params): + paginator = self.client_elb.get_paginator('describe_load_balancers') + results = paginator.paginate(**params).build_full_result() + return results['LoadBalancerDescriptions'] def exists(self, lbtest): """ Verify that the named ELB actually exists """ found = False for lb in self.lbs: - if lb.name == lbtest: + if lb['LoadBalancerName'] == lbtest: found = True break return found @@ -170,63 +200,59 @@ def _enable_availailability_zone(self, lb): Returns True if the zone was enabled or False if no change was made. lb: load balancer""" instance = self._get_instance() - if instance.placement in lb.availability_zones: - return False + desired_zone = instance['Placement']['AvailabilityZone'] - lb.enable_zones(zones=instance.placement) - - # If successful, the new zone will have been added to - # lb.availability_zones - return instance.placement in lb.availability_zones - - def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout): - """Wait for an ELB to change state - lb: load balancer - awaited_state : state to poll for (string)""" + if desired_zone in lb['AvailabilityZones']: + return False - wait_timeout = time.time() + timeout - while True: - instance_state = self._get_instance_health(lb) + if self.module.check_mode: + return True - if not instance_state: - msg = ("The instance %s could not be put in service on %s." - " Reason: Invalid Instance") - self.module.fail_json(msg=msg % (self.instance_id, lb)) + try: + self.client_elb.enable_availability_zones_for_load_balancer( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + AvailabilityZones=[desired_zone], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to enable AZ on load balancers', + load_balancer=lb, zone=desired_zone) + + return True + + def _await_elb_instance_state(self, lb, awaited_state, timeout): + """Wait for an ELB to change state""" + if self.module.check_mode: + return + + initial_state = self._get_instance_health(lb) + + if awaited_state == initial_state: + return + + if awaited_state == 'InService': + waiter = self.client_elb.get_waiter('instance_in_service') + elif awaited_state == 'Deregistered': + waiter = self.client_elb.get_waiter('instance_deregistered') + elif awaited_state == 'OutOfService': + waiter = self.client_elb.get_waiter('instance_deregistered') + else: + self.module.fail_json(msg='Could not wait for unknown state', awaited_state=awaited_state) - if instance_state.state == awaited_state: - # Check the current state against the initial state, and only set - # changed if they are different. - if (initial_state is None) or (instance_state.state != initial_state.state): - self.changed = True - break - elif self._is_instance_state_pending(instance_state): - # If it's pending, we'll skip further checks and continue waiting - pass - elif (awaited_state == 'InService' - and instance_state.reason_code == "Instance" - and time.time() >= wait_timeout): - # If the reason_code for the instance being out of service is - # "Instance" this indicates a failure state, e.g. the instance - # has failed a health check or the ELB does not have the - # instance's availability zone enabled. The exact reason why is - # described in InstantState.description. - msg = ("The instance %s could not be put in service on %s." - " Reason: %s") - self.module.fail_json(msg=msg % (self.instance_id, - lb, - instance_state.description)) - time.sleep(1) - - def _is_instance_state_pending(self, instance_state): - """ - Determines whether the instance_state is "pending", meaning there is - an operation under way to bring it in service. - """ - # This is messy, because AWS provides no way to distinguish between - # an instance that is is OutOfService because it's pending vs. OutOfService - # because it's failing health checks. So we're forced to analyze the - # description, which is likely to be brittle. - return (instance_state and 'pending' in instance_state.description) + try: + waiter.wait( + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{"InstanceId": self.instance_id}], + WaiterConfig={'Delay': 1, 'MaxAttempts': timeout}, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, msg='Timeout waiting for instance to reach desired state', + awaited_state=awaited_state) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Error while waiting for instance to reach desired state', + awaited_state=awaited_state) + + return def _get_instance_health(self, lb): """ @@ -234,13 +260,20 @@ def _get_instance_health(self, lb): certain error conditions. """ try: - status = lb.get_instance_health([self.instance_id])[0] - except boto.exception.BotoServerError as e: - if e.error_code == 'InvalidInstance': - return None - else: - raise - return status + status = self.client_elb.describe_instance_health( + aws_retry=True, + LoadBalancerName=lb['LoadBalancerName'], + Instances=[{'InstanceId': self.instance_id}], + )['InstanceStates'] + except is_boto3_error_code('InvalidInstance'): + return None + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg='Failed to get instance health') + + if not status: + return None + + return status[0]['State'] def _get_instance_lbs(self, ec2_elbs=None): """Returns a list of ELBs attached to self.instance_id @@ -248,36 +281,29 @@ def _get_instance_lbs(self, ec2_elbs=None): for elb lookup instead of returning what elbs are attached to self.instance_id""" + list_params = dict() if not ec2_elbs: ec2_elbs = self._get_auto_scaling_group_lbs() - try: - elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) + if ec2_elbs: + list_params['LoadBalancerNames'] = ec2_elbs - elbs = [] - marker = None - while True: - try: - newelbs = elb.get_all_load_balancers(marker=marker) - marker = newelbs.next_marker - elbs.extend(newelbs) - if not marker: - break - except TypeError: - # Older version of boto do not allow for params - elbs = elb.get_all_load_balancers() - break + try: + elbs = self._describe_elbs(**list_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, 'Failed to describe load balancers') if ec2_elbs: - lbs = sorted([lb for lb in elbs if lb.name in ec2_elbs], key=lambda lb: lb.__repr__()) - else: - lbs = [] - for lb in elbs: - for info in lb.instances: - if self.instance_id == info.id: - lbs.append(lb) + return elbs + + # If ec2_elbs wasn't specified, then filter out LBs we're not a member + # of. + lbs = [] + for lb in elbs: + instance_ids = [i['InstanceId'] for i in lb['Instances']] + if self.instance_id in instance_ids: + lbs.append(lb) + return lbs def _get_auto_scaling_group_lbs(self): @@ -285,34 +311,42 @@ def _get_auto_scaling_group_lbs(self): indirectly through its auto scaling group membership""" try: - asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) + asg_instances = self.client_asg.describe_auto_scaling_instances( + aws_retry=True, + InstanceIds=[self.instance_id])['AutoScalingInstances'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - asg_instances = asg.get_all_autoscaling_instances([self.instance_id]) if len(asg_instances) > 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group instance.") if not asg_instances: - asg_elbs = [] - else: - asg_name = asg_instances[0].group_name + # Instance isn't a member of an ASG + return [] + + asg_name = asg_instances[0]['AutoScalingGroupName'] - asgs = asg.get_all_groups([asg_name]) - if len(asg_instances) != 1: - self.module.fail_json(msg="Illegal state, expected one auto scaling group.") + try: + asg_instances = self.client_asg.describe_auto_scaling_groups( + aws_retry=True, + AutoScalingGroupNames=[asg_name])['AutoScalingGroups'] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') - asg_elbs = asgs[0].load_balancers + if len(asg_instances) != 1: + self.module.fail_json(msg="Illegal state, expected one auto scaling group.") - return asg_elbs + return asg_instances[0]['LoadBalancerNames'] def _get_instance(self): - """Returns a boto.ec2.InstanceObject for self.instance_id""" + """Returns the description of an instance""" try: - ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json(msg=str(e)) - return ec2.get_only_instances(instance_ids=[self.instance_id])[0] + result = self.client_ec2.describe_instances( + aws_retry=True, + InstanceIds=[self.instance_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Failed to describe ASG Instance') + return result['Reservations'][0]['Instances'][0] def main(): @@ -324,48 +358,43 @@ def main(): wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'required': False, 'default': 0, 'type': 'int'}, ) + required_if = [ + ('state', 'present', ['ec2_elbs']), + ] module = AnsibleAWSModule( argument_spec=argument_spec, + required_if=required_if, supports_check_mode=True, - check_boto3=False, ) - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - ec2_elbs = module.params['ec2_elbs'] wait = module.params['wait'] enable_availability_zone = module.params['enable_availability_zone'] timeout = module.params['wait_timeout'] - - if module.params['state'] == 'present' and 'ec2_elbs' not in module.params: - module.fail_json(msg="ELBs are required for registration") - instance_id = module.params['instance_id'] - elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params) + + elb_man = ElbManager(module, instance_id, ec2_elbs) if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): - msg = "ELB %s does not exist" % elb - module.fail_json(msg=msg) + module.fail_json(msg="ELB {0} does not exist".format(elb)) - if not module.check_mode: - if module.params['state'] == 'present': - elb_man.register(wait, enable_availability_zone, timeout) - elif module.params['state'] == 'absent': - elb_man.deregister(wait, timeout) + if module.params['state'] == 'present': + elb_man.register(wait, enable_availability_zone, timeout) + elif module.params['state'] == 'absent': + elb_man.deregister(wait, timeout) - ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]} - ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts) + # XXX We're not an _fact module we shouldn't be returning a fact and poluting + # the namespace + ansible_facts = {'ec2_elbs': [lb['LoadBalancerName'] for lb in elb_man.lbs]} - module.exit_json(**ec2_facts_result) + module.exit_json( + changed=elb_man.changed, + ansible_facts=ansible_facts, + updated_elbs=list(elb_man.updated_elbs), + ) if __name__ == '__main__': diff --git a/tests/integration/targets/elb_instance/defaults/main.yml b/tests/integration/targets/elb_instance/defaults/main.yml index 65b75091b6b..25817c110f3 100644 --- a/tests/integration/targets/elb_instance/defaults/main.yml +++ b/tests/integration/targets/elb_instance/defaults/main.yml @@ -1,5 +1,6 @@ --- -# defaults file for ec2_elb_lb +# defaults file for elb_instance + elb_name_1: 'ansible-test-{{ tiny_prefix }}-1' elb_name_2: 'ansible-test-{{ tiny_prefix }}-2' @@ -7,6 +8,16 @@ vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' +instance_name_1: 'ansible-test-{{ tiny_prefix }}-elb-instance-1' +instance_name_2: 'ansible-test-{{ tiny_prefix }}-elb-instance-2' +lc_name: 'ansible-test-{{ tiny_prefix }}-elb-instance' +asg_name: 'ansible-test-{{ tiny_prefix }}-elb-instance' +vpc_name: 'ansible-test-{{ tiny_prefix }}-elb-instance' +subnet_name_1: 'ansible-test-{{ tiny_prefix }}-elb-instance-1' +subnet_name_2: 'ansible-test-{{ tiny_prefix }}-elb-instance-2' +sg_name_1: 'ansible-test-{{ tiny_prefix }}-elb-instance-1' +sg_name_2: 'ansible-test-{{ tiny_prefix }}-elb-instance-2' + availability_zone_a: '{{ ec2_availability_zone_names[0] }}' availability_zone_b: '{{ ec2_availability_zone_names[1] }}' diff --git a/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml b/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml index 5ad09127084..7ae91ac00b7 100644 --- a/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml +++ b/tests/integration/targets/elb_instance/tasks/cleanup_instances.yml @@ -10,14 +10,14 @@ - name: Delete ASG ec2_asg: - name: "ansible-test-{{ tiny_prefix }}-elb" + name: '{{ asg_name }}' state: absent ignore_errors: true register: ec2_asg_a - name: Delete Launch Template ec2_lc: - name: "ansible-test-{{ tiny_prefix }}-elb" + name: '{{ lc_name }}' state: absent ignore_errors: true register: ec2_lc_a diff --git a/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml b/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml index 5c79f7adfbe..9abeb74a279 100644 --- a/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml +++ b/tests/integration/targets/elb_instance/tasks/cleanup_vpc.yml @@ -5,8 +5,8 @@ state: absent ignore_errors: true loop: - - '{{ resource_prefix }}-a' - - '{{ resource_prefix }}-b' + - '{{ sg_name_1 }}' + - '{{ sg_name_2 }}' - name: delete subnets ec2_vpc_subnet: @@ -22,5 +22,5 @@ ec2_vpc_net: cidr_block: '{{ vpc_cidr }}' state: absent - name: '{{ resource_prefix }}' + name: '{{ vpc_name }}' ignore_errors: true diff --git a/tests/integration/targets/elb_instance/tasks/manage_asgs.yml b/tests/integration/targets/elb_instance/tasks/manage_asgs.yml index b4e246ef08c..776fdbd51da 100644 --- a/tests/integration/targets/elb_instance/tasks/manage_asgs.yml +++ b/tests/integration/targets/elb_instance/tasks/manage_asgs.yml @@ -1,7 +1,7 @@ --- - name: Get ASG info ec2_asg_info: - name: "ansible-test-{{ tiny_prefix }}-elb$" + name: "{{ asg_name }}$" register: asg_info - name: Store Instance ID from ASG @@ -26,7 +26,10 @@ - assert: that: - remove_instance is successful - # - remove_instance is changed + - remove_instance is changed + - '"updated_elbs" in remove_instance' + - elb_name_1 in remove_instance.updated_elbs + - elb_name_2 in remove_instance.updated_elbs # It really shouldn't be returning a fact here - '"ansible_facts" in remove_instance' - '"ec2_elbs" in remove_instance.ansible_facts' @@ -54,6 +57,9 @@ that: - remove_instance is successful - remove_instance is changed + - '"updated_elbs" in remove_instance' + - elb_name_1 in remove_instance.updated_elbs + - elb_name_2 in remove_instance.updated_elbs # It really shouldn't be returning a fact here - '"ansible_facts" in remove_instance' - '"ec2_elbs" in remove_instance.ansible_facts' @@ -82,6 +88,9 @@ that: - remove_instance is successful - remove_instance is not changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 not in remove_instance.updated_elbs # Check the real state - instance_asg not in elb_info_1.elbs[0].instances_inservice - instance_asg not in elb_info_2.elbs[0].instances_inservice @@ -103,8 +112,10 @@ - assert: that: - remove_instance is successful - # XXX always returns the ELBs the ASG belongs to - # - remove_instance is not changed + - remove_instance is not changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 not in remove_instance.updated_elbs # Check the real state - instance_asg not in elb_info_1.elbs[0].instances_inservice - instance_asg not in elb_info_2.elbs[0].instances_inservice diff --git a/tests/integration/targets/elb_instance/tasks/manage_instances.yml b/tests/integration/targets/elb_instance/tasks/manage_instances.yml index 4530b371ede..67a1ea1b02f 100644 --- a/tests/integration/targets/elb_instance/tasks/manage_instances.yml +++ b/tests/integration/targets/elb_instance/tasks/manage_instances.yml @@ -27,7 +27,10 @@ - assert: that: - add_instance is successful - # - add_instance is changed + - add_instance is changed + - '"updated_elbs" in add_instance' + - elb_name_1 in add_instance.updated_elbs + - elb_name_2 in add_instance.updated_elbs # It really shouldn't be returning a fact here - '"ansible_facts" in add_instance' - '"ec2_elbs" in add_instance.ansible_facts' @@ -58,6 +61,9 @@ that: - add_instance is successful - add_instance is changed + - '"updated_elbs" in add_instance' + - elb_name_1 in add_instance.updated_elbs + - elb_name_2 in add_instance.updated_elbs # It really shouldn't be returning a fact here - '"ansible_facts" in add_instance' - '"ec2_elbs" in add_instance.ansible_facts' @@ -89,6 +95,9 @@ that: - add_instance is successful - add_instance is not changed + - '"updated_elbs" in add_instance' + - elb_name_1 not in add_instance.updated_elbs + - elb_name_2 not in add_instance.updated_elbs # Check the real state didn't change - instance_a in elb_info_1.elbs[0].instances_inservice - instance_a in elb_info_2.elbs[0].instances_inservice @@ -114,6 +123,9 @@ that: - add_instance is successful - add_instance is not changed + - '"updated_elbs" in add_instance' + - elb_name_1 not in add_instance.updated_elbs + - elb_name_2 not in add_instance.updated_elbs # Check the real state didn't change - instance_a in elb_info_1.elbs[0].instances_inservice - instance_a in elb_info_2.elbs[0].instances_inservice @@ -137,7 +149,7 @@ - assert: that: - # - add_instance is failed + # - add_instance is failed # Check the real state didn't change - instance_b not in elb_info_1.elbs[0].instances_inservice @@ -243,7 +255,6 @@ # ################################################################################ - - name: 'Remove an instance from two ELBs (check_mode)' elb_instance: instance_id: '{{ instance_a }}' @@ -265,7 +276,10 @@ - assert: that: - remove_instance is successful - # - remove_instance is changed + - remove_instance is changed + - '"updated_elbs" in remove_instance' + - elb_name_1 in remove_instance.updated_elbs + - elb_name_2 in remove_instance.updated_elbs # It really shouldn't be returning a fact here - '"ansible_facts" in remove_instance' - '"ec2_elbs" in remove_instance.ansible_facts' @@ -296,6 +310,9 @@ that: - remove_instance is successful - remove_instance is changed + - '"updated_elbs" in remove_instance' + - elb_name_1 in remove_instance.updated_elbs + - elb_name_2 in remove_instance.updated_elbs # It really shouldn't be returning a fact here - '"ansible_facts" in remove_instance' - '"ec2_elbs" in remove_instance.ansible_facts' @@ -327,6 +344,9 @@ that: - remove_instance is successful - remove_instance is not changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 not in remove_instance.updated_elbs # Check the real state didn't change - instance_a not in elb_info_1.elbs[0].instances_inservice - instance_a not in elb_info_2.elbs[0].instances_inservice @@ -352,6 +372,145 @@ that: - remove_instance is successful - remove_instance is not changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 not in remove_instance.updated_elbs # Check the real state didn't change - instance_a not in elb_info_1.elbs[0].instances_inservice - instance_a not in elb_info_2.elbs[0].instances_inservice + +################################################################################ + +- name: 'Add second instance to one ELB' + elb_instance: + instance_id: '{{ instance_b }}' + state: 'present' + ec2_elbs: + - '{{ elb_name_2 }}' + wait_timeout: 60 + register: add_instance + +- assert: + that: + - add_instance is successful + - add_instance is changed + - '"updated_elbs" in add_instance' + - elb_name_1 not in add_instance.updated_elbs + - elb_name_2 in add_instance.updated_elbs + # It really shouldn't be returning a fact here + - '"ansible_facts" in add_instance' + - '"ec2_elbs" in add_instance.ansible_facts' + - elb_name_1 not in add_instance.ansible_facts.ec2_elbs + - elb_name_2 in add_instance.ansible_facts.ec2_elbs + +- name: 'Remove an instance without specifying ELBs (check_mode)' + elb_instance: + instance_id: '{{ instance_b }}' + state: 'absent' + wait_timeout: 60 + register: remove_instance + check_mode: true + +- elb_classic_lb_info: + names: '{{ elb_name_1 }}' + register: elb_info_1 +- elb_classic_lb_info: + names: '{{ elb_name_2 }}' + register: elb_info_2 + +- assert: + that: + - remove_instance is successful + - remove_instance is changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 in remove_instance.updated_elbs + # It really shouldn't be returning a fact here + - '"ansible_facts" in remove_instance' + - '"ec2_elbs" in remove_instance.ansible_facts' + - elb_name_1 not in remove_instance.ansible_facts.ec2_elbs + - elb_name_2 in remove_instance.ansible_facts.ec2_elbs + # Check the real state didn't change + - instance_b not in elb_info_1.elbs[0].instances_inservice + - instance_b in elb_info_2.elbs[0].instances_inservice + +- name: 'Remove an instance without specifying ELBs' + elb_instance: + instance_id: '{{ instance_b }}' + state: 'absent' + wait_timeout: 60 + register: remove_instance + +- elb_classic_lb_info: + names: '{{ elb_name_1 }}' + register: elb_info_1 +- elb_classic_lb_info: + names: '{{ elb_name_2 }}' + register: elb_info_2 + +- assert: + that: + - remove_instance is successful + - remove_instance is changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 in remove_instance.updated_elbs + # It really shouldn't be returning a fact here + - '"ansible_facts" in remove_instance' + - '"ec2_elbs" in remove_instance.ansible_facts' + - elb_name_1 not in remove_instance.ansible_facts.ec2_elbs + - elb_name_2 in remove_instance.ansible_facts.ec2_elbs + # Check the real state + - instance_b not in elb_info_1.elbs[0].instances_inservice + - instance_b not in elb_info_2.elbs[0].instances_inservice + +- name: 'Remove an instance without specifying ELBs - idempotency (check_mode)' + elb_instance: + instance_id: '{{ instance_b }}' + state: 'absent' + wait_timeout: 60 + register: remove_instance + check_mode: true + +- elb_classic_lb_info: + names: '{{ elb_name_1 }}' + register: elb_info_1 +- elb_classic_lb_info: + names: '{{ elb_name_2 }}' + register: elb_info_2 + +- assert: + that: + - remove_instance is successful + - remove_instance is not changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 not in remove_instance.updated_elbs + # Check the real state didn't change + - instance_b not in elb_info_1.elbs[0].instances_inservice + - instance_b not in elb_info_2.elbs[0].instances_inservice + +- name: 'Remove an instance without specifying ELBs - idempotency' + elb_instance: + instance_id: '{{ instance_b }}' + state: 'absent' + wait_timeout: 60 + register: remove_instance + +- elb_classic_lb_info: + names: '{{ elb_name_1 }}' + register: elb_info_1 +- elb_classic_lb_info: + names: '{{ elb_name_2 }}' + register: elb_info_2 + +- assert: + that: + - remove_instance is successful + - remove_instance is not changed + - '"updated_elbs" in remove_instance' + - elb_name_1 not in remove_instance.updated_elbs + - elb_name_2 not in remove_instance.updated_elbs + # Check the real state didn't change + - instance_b not in elb_info_1.elbs[0].instances_inservice + - instance_b not in elb_info_2.elbs[0].instances_inservice diff --git a/tests/integration/targets/elb_instance/tasks/setup_elbs.yml b/tests/integration/targets/elb_instance/tasks/setup_elbs.yml index 835c7a03600..2e820f820c1 100644 --- a/tests/integration/targets/elb_instance/tasks/setup_elbs.yml +++ b/tests/integration/targets/elb_instance/tasks/setup_elbs.yml @@ -17,11 +17,6 @@ timeout: 2 register: result -- assert: - that: - - result is changed - - result.elb.status == "created" - - name: Create a private load balancer with 2 AZs enabled elb_classic_lb: name: "{{ elb_name_2 }}" @@ -38,8 +33,3 @@ interval: 5 timeout: 2 register: result - -- assert: - that: - - result is changed - - result.elb.status == "created" diff --git a/tests/integration/targets/elb_instance/tasks/setup_instances.yml b/tests/integration/targets/elb_instance/tasks/setup_instances.yml index 2abc348dfca..b89b38d20bd 100644 --- a/tests/integration/targets/elb_instance/tasks/setup_instances.yml +++ b/tests/integration/targets/elb_instance/tasks/setup_instances.yml @@ -1,7 +1,7 @@ --- - name: Create instance a ec2_instance: - name: "ansible-test-{{ tiny_prefix }}-elb-a" + name: "{{ instance_name_1 }}" image_id: "{{ ec2_ami_id }}" vpc_subnet_id: "{{ subnet_a }}" instance_type: t3.micro @@ -11,7 +11,7 @@ - name: Create instance b ec2_instance: - name: "ansible-test-{{ tiny_prefix }}-elb-b" + name: "{{ instance_name_2 }}" image_id: "{{ ec2_ami_id }}" vpc_subnet_id: "{{ subnet_b }}" instance_type: t3.micro @@ -26,7 +26,7 @@ - name: Create a Launch Template ec2_lc: - name: "ansible-test-{{ tiny_prefix }}-elb" + name: "{{ lc_name }}" image_id: "{{ ec2_ami_id }}" security_groups: "{{ sg_a }}" instance_type: t3.micro @@ -35,11 +35,11 @@ - name: Create an ASG ec2_asg: - name: "ansible-test-{{ tiny_prefix }}-elb" + name: "{{ asg_name }}" load_balancers: - "{{ elb_name_1 }}" - "{{ elb_name_2 }}" - launch_config_name: "ansible-test-{{ tiny_prefix }}-elb" + launch_config_name: "{{ lc_name }}" availability_zones: - "{{ availability_zone_a }}" min_size: 0 diff --git a/tests/integration/targets/elb_instance/tasks/setup_vpc.yml b/tests/integration/targets/elb_instance/tasks/setup_vpc.yml index 6edbe7e2c9b..26fafa41c94 100644 --- a/tests/integration/targets/elb_instance/tasks/setup_vpc.yml +++ b/tests/integration/targets/elb_instance/tasks/setup_vpc.yml @@ -4,9 +4,9 @@ ec2_vpc_net: cidr_block: '{{ vpc_cidr }}' state: present - name: '{{ resource_prefix }}' + name: '{{ vpc_name }}' resource_tags: - Name: '{{ resource_prefix }}' + Name: '{{ vpc_name }}' register: setup_vpc - name: create a subnet @@ -17,7 +17,7 @@ cidr: '{{ subnet_cidr_1 }}' state: present resource_tags: - Name: '{{ resource_prefix }}-a' + Name: '{{ subnet_name_1 }}' register: setup_subnet_1 - name: create a subnet @@ -28,12 +28,12 @@ cidr: '{{ subnet_cidr_2 }}' state: present resource_tags: - Name: '{{ resource_prefix }}-b' + Name: '{{ subnet_name_2 }}' register: setup_subnet_2 - name: create a security group ec2_group: - name: '{{ resource_prefix }}-a' + name: '{{ sg_name_1 }}' description: 'created by Ansible integration tests' state: present vpc_id: '{{ setup_vpc.vpc.id }}' @@ -46,7 +46,7 @@ - name: create a security group ec2_group: - name: '{{ resource_prefix }}-b' + name: '{{ sg_name_2 }}' description: 'created by Ansible integration tests' state: present vpc_id: '{{ setup_vpc.vpc.id }}'