diff --git a/changelogs/fragments/377-ec2_elb_lb-boto3.yml b/changelogs/fragments/377-ec2_elb_lb-boto3.yml new file mode 100644 index 00000000000..e32a65a12ce --- /dev/null +++ b/changelogs/fragments/377-ec2_elb_lb-boto3.yml @@ -0,0 +1,8 @@ +minor_changes: +- ec2_elb_lb - module renamed to ``elb_classic_lb`` (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - migrated to boto3 SDK (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for check_mode (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for wait during creation (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added support for wait during instance addition and removal (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - added retries on common AWS temporary API failures (https://github.com/ansible-collections/amazon.aws/pull/377). +- elb_classic_lb - various error messages changed due to refactor (https://github.com/ansible-collections/amazon.aws/pull/377). diff --git a/meta/runtime.yml b/meta/runtime.yml index 894f5ca7ecd..fdd0b5fac76 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -49,6 +49,7 @@ action_groups: - ec2_vpc_net_info - ec2_vpc_subnet - ec2_vpc_subnet_info + - elb_classic_lb - s3_bucket - ec2_vpc_endpoint_facts - ec2_vpc_endpoint @@ -98,6 +99,8 @@ plugin_routing: warning_text: >- ec2_ami_facts was renamed in Ansible 2.9 to ec2_ami_info. Please update your tasks. + ec2_elb_lb: + redirect: amazon.aws.elb_classic_lb ec2_eni_facts: deprecation: removal_date: 2021-12-01 diff --git a/plugins/module_utils/waiters.py b/plugins/module_utils/waiters.py index a234d6c09b4..403fdb3a023 100644 --- a/plugins/module_utils/waiters.py +++ b/plugins/module_utils/waiters.py @@ -90,6 +90,30 @@ }, ] }, + "NetworkInterfaceDeleted": { + "operation": "DescribeNetworkInterfaces", + "delay": 5, + "maxAttempts": 40, + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(NetworkInterfaces[]) > `0`", + "state": "retry" + }, + { + "matcher": "path", + "expected": True, + "argument": "length(NetworkInterfaces[]) == `0`", + "state": "success" + }, + { + "expected": "InvalidNetworkInterfaceID.NotFound", + "matcher": "error", + "state": "success" + }, + ] + }, "NetworkInterfaceDeleteOnTerminate": { "operation": "DescribeNetworkInterfaces", "delay": 5, @@ -462,6 +486,98 @@ } +elb_data = { + "version": 2, + "waiters": { + "AnyInstanceInService": { + "acceptors": [ + { + "argument": "InstanceStates[].State", + "expected": "InService", + "matcher": "pathAny", + "state": "success" + } + ], + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeInstanceHealth" + }, + "InstanceDeregistered": { + "delay": 15, + "operation": "DescribeInstanceHealth", + "maxAttempts": 40, + "acceptors": [ + { + "expected": "OutOfService", + "matcher": "pathAll", + "state": "success", + "argument": "InstanceStates[].State" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "success" + } + ] + }, + "InstanceInService": { + "acceptors": [ + { + "argument": "InstanceStates[].State", + "expected": "InService", + "matcher": "pathAll", + "state": "success" + }, + { + "matcher": "error", + "expected": "InvalidInstance", + "state": "retry" + } + ], + "delay": 15, + "maxAttempts": 40, + "operation": "DescribeInstanceHealth" + }, + "LoadBalancerCreated": { + "delay": 10, + "maxAttempts": 60, + "operation": "DescribeLoadBalancers", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(LoadBalancerDescriptions[]) > `0`", + "state": "success", + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "retry", + }, + ], + }, + "LoadBalancerDeleted": { + "delay": 10, + "maxAttempts": 60, + "operation": "DescribeLoadBalancers", + "acceptors": [ + { + "matcher": "path", + "expected": True, + "argument": "length(LoadBalancerDescriptions[]) > `0`", + "state": "retry", + }, + { + "matcher": "error", + "expected": "LoadBalancerNotFound", + "state": "success", + }, + ], + }, + } +} + + rds_data = { "version": 2, "waiters": { @@ -572,6 +688,11 @@ def eks_model(name): return eks_models.get_waiter(name) +def elb_model(name): + elb_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(elb_data)) + return elb_models.get_waiter(name) + + def rds_model(name): rds_models = core_waiter.WaiterModel(waiter_config=_inject_limit_retries(rds_data)) return rds_models.get_waiter(name) @@ -601,6 +722,12 @@ def route53_model(name): core_waiter.NormalizedOperationMethod( ec2.describe_network_interfaces )), + ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter( + 'network_interface_deleted', + ec2_model('NetworkInterfaceDeleted'), + core_waiter.NormalizedOperationMethod( + ec2.describe_network_interfaces + )), ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter( 'network_interface_available', ec2_model('NetworkInterfaceAvailable'), @@ -745,6 +872,36 @@ def route53_model(name): core_waiter.NormalizedOperationMethod( eks.describe_cluster )), + ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter( + 'any_instance_in_service', + elb_model('AnyInstanceInService'), + core_waiter.NormalizedOperationMethod( + elb.describe_instance_health + )), + ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter( + 'instance_deregistered', + elb_model('InstanceDeregistered'), + core_waiter.NormalizedOperationMethod( + elb.describe_instance_health + )), + ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter( + 'load_balancer_created', + elb_model('InstanceInService'), + core_waiter.NormalizedOperationMethod( + elb.describe_instance_health + )), + ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter( + 'load_balancer_created', + elb_model('LoadBalancerCreated'), + core_waiter.NormalizedOperationMethod( + elb.describe_load_balancers + )), + ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter( + 'load_balancer_deleted', + elb_model('LoadBalancerDeleted'), + core_waiter.NormalizedOperationMethod( + elb.describe_load_balancers + )), ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter( 'db_instance_stopped', rds_model('DBInstanceStopped'), diff --git a/plugins/modules/ec2_elb_lb.py b/plugins/modules/ec2_elb_lb.py deleted file mode 100644 index 1c3c8d34fbb..00000000000 --- a/plugins/modules/ec2_elb_lb.py +++ /dev/null @@ -1,1337 +0,0 @@ -#!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' ---- -module: ec2_elb_lb -version_added: 1.0.0 -description: - - Returns information about the load balancer. - - Will be marked changed when called only if state is changed. -short_description: Creates, updates or destroys an Amazon ELB. -author: - - "Jim Dalton (@jsdalton)" -options: - state: - description: - - Create or destroy the ELB. - type: str - choices: [ absent, present ] - required: true - name: - description: - - The name of the ELB. - type: str - required: true - listeners: - description: - - List of ports/protocols for this ELB to listen on (see examples). - type: list - elements: dict - purge_listeners: - description: - - Purge existing listeners on ELB that are not found in listeners. - type: bool - default: yes - instance_ids: - description: - - List of instance ids to attach to this ELB. - type: list - elements: str - purge_instance_ids: - description: - - Purge existing instance ids on ELB that are not found in instance_ids. - type: bool - default: no - zones: - description: - - List of availability zones to enable on this ELB. - type: list - elements: str - purge_zones: - description: - - Purge existing availability zones on ELB that are not found in zones. - type: bool - default: no - security_group_ids: - description: - - A list of security groups to apply to the ELB. - type: list - elements: str - security_group_names: - description: - - A list of security group names to apply to the ELB. - type: list - elements: str - health_check: - description: - - An associative array of health check configuration settings (see examples). - type: dict - access_logs: - description: - - An associative array of access logs configuration settings (see examples). - type: dict - subnets: - description: - - A list of VPC subnets to use when creating ELB. Zones should be empty if using this. - type: list - elements: str - purge_subnets: - description: - - Purge existing subnet on ELB that are not found in subnets. - type: bool - default: no - scheme: - description: - - The scheme to use when creating the ELB. For a private VPC-visible ELB use C(internal). - - If you choose to update your scheme with a different value the ELB will be destroyed and - recreated. To update scheme you must use the option I(wait). - type: str - choices: ["internal", "internet-facing"] - default: 'internet-facing' - connection_draining_timeout: - description: - - Wait a specified timeout allowing connections to drain before terminating an instance. - type: int - idle_timeout: - description: - - ELB connections from clients and to servers are timed out after this amount of time. - type: int - cross_az_load_balancing: - description: - - Distribute load across all configured Availability Zones. - - Defaults to C(false). - type: bool - stickiness: - description: - - An associative array of stickiness policy settings. Policy will be applied to all listeners (see examples). - type: dict - wait: - description: - - When specified, Ansible will check the status of the load balancer to ensure it has been successfully - removed from AWS. - type: bool - default: no - wait_timeout: - description: - - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated. - - A maximum of 600 seconds (10 minutes) is allowed. - type: int - default: 60 - tags: - description: - - An associative array of tags. To delete all tags, supply an empty dict (C({})). - type: dict - -extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 - -requirements: -- python >= 2.6 -- boto - -''' - -EXAMPLES = """ -# Note: None of these examples set aws_access_key, aws_secret_key, or region. -# It is assumed that their matching environment variables are set. - -# Basic provisioning example (non-VPC) - -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - proxy_protocol: True - - protocol: https - load_balancer_port: 443 - instance_protocol: http # optional, defaults to value of protocol setting - instance_port: 80 - # ssl certificate required for https or ssl - ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" - -# Internal ELB example - -- amazon.aws.ec2_elb_lb: - name: "test-vpc" - scheme: internal - state: present - instance_ids: - - i-abcd1234 - purge_instance_ids: true - subnets: - - subnet-abcd1234 - - subnet-1a2b3c4d - listeners: - - protocol: http # options are http, https, ssl, tcp - load_balancer_port: 80 - instance_port: 80 - -# Configure a health check and the access logs -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - health_check: - ping_protocol: http # options are http, https, ssl, tcp - ping_port: 80 - ping_path: "/index.html" # not required for tcp or ssl - response_timeout: 5 # seconds - interval: 30 # seconds - unhealthy_threshold: 2 - healthy_threshold: 10 - access_logs: - interval: 5 # minutes (defaults to 60) - s3_location: "my-bucket" # This value is required if access_logs is set - s3_prefix: "logs" - -# Ensure ELB is gone -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: absent - -# Ensure ELB is gone and wait for check (for default timeout) -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: absent - wait: yes - -# Ensure ELB is gone and wait for check with timeout value -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: absent - wait: yes - wait_timeout: 600 - -# Normally, this module will purge any listeners that exist on the ELB -# but aren't specified in the listeners parameter. If purge_listeners is -# false it leaves them alone -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_listeners: no - -# Normally, this module will leave availability zones that are enabled -# on the ELB alone. If purge_zones is true, then any extraneous zones -# will be removed -- amazon.aws.ec2_elb_lb: - name: "test-please-delete" - state: present - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_zones: yes - -# Creates a ELB and assigns a list of subnets to it. -- amazon.aws.ec2_elb_lb: - state: present - name: 'New ELB' - security_group_ids: 'sg-123456, sg-67890' - region: us-west-2 - subnets: 'subnet-123456,subnet-67890' - purge_subnets: yes - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - -# Create an ELB with connection draining, increased idle timeout and cross availability -# zone load balancing -- amazon.aws.ec2_elb_lb: - name: "New ELB" - state: present - connection_draining_timeout: 60 - idle_timeout: 300 - cross_az_load_balancing: "yes" - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - -# Create an ELB with load balancer stickiness enabled -- amazon.aws.ec2_elb_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - stickiness: - type: loadbalancer - enabled: yes - expiration: 300 - -# Create an ELB with application stickiness enabled -- amazon.aws.ec2_elb_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - stickiness: - type: application - enabled: yes - cookie: SESSIONID - -# Create an ELB and add tags -- amazon.aws.ec2_elb_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - tags: - Name: "New ELB" - stack: "production" - client: "Bob" - -# Delete all tags from an ELB -- amazon.aws.ec2_elb_lb: - name: "New ELB" - state: present - region: us-east-1 - zones: - - us-east-1a - - us-east-1d - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - tags: {} -""" - -import random -import time - -try: - import boto - import boto.ec2.elb - import boto.ec2.elb.attributes - import boto.vpc - from boto.ec2.elb.healthcheck import HealthCheck - from boto.ec2.tag import Tag -except ImportError: - pass # Taken care of by ec2.HAS_BOTO - -from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native - -from ..module_utils.core import AnsibleAWSModule -from ..module_utils.ec2 import AnsibleAWSError -from ..module_utils.ec2 import HAS_BOTO -from ..module_utils.ec2 import connect_to_aws -from ..module_utils.ec2 import get_aws_connection_info - - -def _throttleable_operation(max_retries): - def _operation_wrapper(op): - def _do_op(*args, **kwargs): - retry = 0 - while True: - try: - return op(*args, **kwargs) - except boto.exception.BotoServerError as e: - if retry < max_retries and e.code in \ - ("Throttling", "RequestLimitExceeded"): - retry = retry + 1 - time.sleep(min(random.random() * (2 ** retry), 300)) - continue - else: - raise - return _do_op - return _operation_wrapper - - -def _get_vpc_connection(module, region, aws_connect_params): - try: - return connect_to_aws(boto.vpc, region, **aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - module.fail_json_aws(e, 'Failed to connect to AWS') - - -_THROTTLING_RETRIES = 5 - - -class ElbManager(object): - """Handles ELB creation and destruction""" - - def __init__(self, module, name, listeners=None, purge_listeners=None, - zones=None, purge_zones=None, security_group_ids=None, - health_check=None, subnets=None, purge_subnets=None, - scheme="internet-facing", connection_draining_timeout=None, - idle_timeout=None, - cross_az_load_balancing=None, access_logs=None, - stickiness=None, wait=None, wait_timeout=None, tags=None, - region=None, - instance_ids=None, purge_instance_ids=None, **aws_connect_params): - - self.module = module - self.name = name - self.listeners = listeners - self.purge_listeners = purge_listeners - self.instance_ids = instance_ids - self.purge_instance_ids = purge_instance_ids - self.zones = zones - self.purge_zones = purge_zones - self.security_group_ids = security_group_ids - self.health_check = health_check - self.subnets = subnets - self.purge_subnets = purge_subnets - self.scheme = scheme - self.connection_draining_timeout = connection_draining_timeout - self.idle_timeout = idle_timeout - self.cross_az_load_balancing = cross_az_load_balancing - self.access_logs = access_logs - self.stickiness = stickiness - self.wait = wait - self.wait_timeout = wait_timeout - self.tags = tags - - self.aws_connect_params = aws_connect_params - self.region = region - - self.changed = False - self.status = 'gone' - self.elb_conn = self._get_elb_connection() - - try: - self.elb = self._get_elb() - except boto.exception.BotoServerError as e: - module.fail_json_aws(e, msg='Unable to get all load balancers') - - self.ec2_conn = self._get_ec2_connection() - - @_throttleable_operation(_THROTTLING_RETRIES) - def ensure_ok(self): - """Create the ELB""" - if not self.elb: - # Zones and listeners will be added at creation - self._create_elb() - else: - if self._get_scheme(): - # the only way to change the scheme is by recreating the resource - self.ensure_gone() - self._create_elb() - else: - self._set_zones() - self._set_security_groups() - self._set_elb_listeners() - self._set_subnets() - self._set_health_check() - # boto has introduced support for some ELB attributes in - # different versions, so we check first before trying to - # set them to avoid errors - if self._check_attribute_support('connection_draining'): - self._set_connection_draining_timeout() - if self._check_attribute_support('connecting_settings'): - self._set_idle_timeout() - if self._check_attribute_support('cross_zone_load_balancing'): - self._set_cross_az_load_balancing() - if self._check_attribute_support('access_log'): - self._set_access_log() - # add sticky options - self.select_stickiness_policy() - - # ensure backend server policies are correct - self._set_backend_policies() - # set/remove instance ids - self._set_instance_ids() - - self._set_tags() - - def ensure_gone(self): - """Destroy the ELB""" - if self.elb: - self._delete_elb() - if self.wait: - elb_removed = self._wait_for_elb_removed() - # Unfortunately even though the ELB itself is removed quickly - # the interfaces take longer so reliant security groups cannot - # be deleted until the interface has registered as removed. - elb_interface_removed = self._wait_for_elb_interface_removed() - if not (elb_removed and elb_interface_removed): - self.module.fail_json(msg='Timed out waiting for removal of load balancer.') - - def get_info(self): - try: - check_elb = self.elb_conn.get_all_load_balancers(self.name)[0] - except Exception: - check_elb = None - - if not check_elb: - info = { - 'name': self.name, - 'status': self.status, - 'region': self.region - } - else: - try: - lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name'] - except Exception: - lb_cookie_policy = None - try: - app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name'] - except Exception: - app_cookie_policy = None - - info = { - 'name': check_elb.name, - 'dns_name': check_elb.dns_name, - 'zones': check_elb.availability_zones, - 'security_group_ids': check_elb.security_groups, - 'status': self.status, - 'subnets': self.subnets, - 'scheme': check_elb.scheme, - 'hosted_zone_name': check_elb.canonical_hosted_zone_name, - 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, - 'lb_cookie_policy': lb_cookie_policy, - 'app_cookie_policy': app_cookie_policy, - 'proxy_policy': self._get_proxy_protocol_policy(), - 'backends': self._get_backend_policies(), - 'instances': [instance.id for instance in check_elb.instances], - 'out_of_service_count': 0, - 'in_service_count': 0, - 'unknown_instance_state_count': 0, - 'region': self.region - } - - # status of instances behind the ELB - if info['instances']: - info['instance_health'] = [dict( - instance_id=instance_state.instance_id, - reason_code=instance_state.reason_code, - state=instance_state.state - ) for instance_state in self.elb_conn.describe_instance_health(self.name)] - else: - info['instance_health'] = [] - - # instance state counts: InService or OutOfService - if info['instance_health']: - for instance_state in info['instance_health']: - if instance_state['state'] == "InService": - info['in_service_count'] += 1 - elif instance_state['state'] == "OutOfService": - info['out_of_service_count'] += 1 - else: - info['unknown_instance_state_count'] += 1 - - if check_elb.health_check: - info['health_check'] = { - 'target': check_elb.health_check.target, - 'interval': check_elb.health_check.interval, - 'timeout': check_elb.health_check.timeout, - 'healthy_threshold': check_elb.health_check.healthy_threshold, - 'unhealthy_threshold': check_elb.health_check.unhealthy_threshold, - } - - if check_elb.listeners: - info['listeners'] = [self._api_listener_as_tuple(l) - for l in check_elb.listeners] - elif self.status == 'created': - # When creating a new ELB, listeners don't show in the - # immediately returned result, so just include the - # ones that were added - info['listeners'] = [self._listener_as_tuple(l) - for l in self.listeners] - else: - info['listeners'] = [] - - if self._check_attribute_support('connection_draining'): - info['connection_draining_timeout'] = int(self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout) - - if self._check_attribute_support('connecting_settings'): - info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout - - if self._check_attribute_support('cross_zone_load_balancing'): - is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing') - if is_cross_az_lb_enabled: - info['cross_az_load_balancing'] = 'yes' - else: - info['cross_az_load_balancing'] = 'no' - - # return stickiness info? - - info['tags'] = self.tags - - return info - - @_throttleable_operation(_THROTTLING_RETRIES) - def _wait_for_elb_removed(self): - polling_increment_secs = 15 - max_retries = (self.wait_timeout // polling_increment_secs) - status_achieved = False - - for x in range(0, max_retries): - try: - self.elb_conn.get_all_lb_attributes(self.name) - except (boto.exception.BotoServerError, Exception) as e: - if "LoadBalancerNotFound" in e.code: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - - return status_achieved - - @_throttleable_operation(_THROTTLING_RETRIES) - def _wait_for_elb_interface_removed(self): - polling_increment_secs = 15 - max_retries = (self.wait_timeout // polling_increment_secs) - status_achieved = False - - elb_interfaces = self.ec2_conn.get_all_network_interfaces( - filters={'attachment.instance-owner-id': 'amazon-elb', - 'description': 'ELB {0}'.format(self.name)}) - - for x in range(0, max_retries): - for interface in elb_interfaces: - try: - result = self.ec2_conn.get_all_network_interfaces(interface.id) - if result == []: - status_achieved = True - break - else: - time.sleep(polling_increment_secs) - except (boto.exception.BotoServerError, Exception) as e: - if 'InvalidNetworkInterfaceID' in e.code: - status_achieved = True - break - else: - self.module.fail_json_aws(e, 'Failure while waiting for interface to be removed') - - return status_achieved - - @_throttleable_operation(_THROTTLING_RETRIES) - def _get_elb(self): - elbs = self.elb_conn.get_all_load_balancers() - for elb in elbs: - if self.name == elb.name: - self.status = 'ok' - return elb - - def _get_elb_connection(self): - try: - return connect_to_aws(boto.ec2.elb, self.region, - **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: - self.module.fail_json_aws(e, 'Failure while connecting to AWS') - - def _get_ec2_connection(self): - try: - return connect_to_aws(boto.ec2, self.region, - **self.aws_connect_params) - except (boto.exception.NoAuthHandlerFound, Exception) as e: - self.module.fail_json_aws(e, 'Failure while connecting to AWS') - - @_throttleable_operation(_THROTTLING_RETRIES) - def _delete_elb(self): - # True if succeeds, exception raised if not - result = self.elb_conn.delete_load_balancer(name=self.name) - if result: - self.changed = True - self.status = 'deleted' - - def _create_elb(self): - listeners = [self._listener_as_tuple(l) for l in self.listeners] - self.elb = self.elb_conn.create_load_balancer(name=self.name, - zones=self.zones, - security_groups=self.security_group_ids, - complex_listeners=listeners, - subnets=self.subnets, - scheme=self.scheme) - if self.elb: - # HACK: Work around a boto bug in which the listeners attribute is - # always set to the listeners argument to create_load_balancer, and - # not the complex_listeners - # We're not doing a self.elb = self._get_elb here because there - # might be eventual consistency issues and it doesn't necessarily - # make sense to wait until the ELB gets returned from the EC2 API. - # This is necessary in the event we hit the throttling errors and - # need to retry ensure_ok - # See https://github.com/boto/boto/issues/3526 - self.elb.listeners = self.listeners - self.changed = True - self.status = 'created' - - def _create_elb_listeners(self, listeners): - """Takes a list of listener tuples and creates them""" - # True if succeeds, exception raised if not - self.changed = self.elb_conn.create_load_balancer_listeners(self.name, - complex_listeners=listeners) - - def _delete_elb_listeners(self, listeners): - """Takes a list of listener tuples and deletes them from the elb""" - ports = [l[0] for l in listeners] - - # True if succeeds, exception raised if not - self.changed = self.elb_conn.delete_load_balancer_listeners(self.name, - ports) - - def _set_elb_listeners(self): - """ - Creates listeners specified by self.listeners; overwrites existing - listeners on these ports; removes extraneous listeners - """ - listeners_to_add = [] - listeners_to_remove = [] - listeners_to_keep = [] - - # Check for any listeners we need to create or overwrite - for listener in self.listeners: - listener_as_tuple = self._listener_as_tuple(listener) - - # First we loop through existing listeners to see if one is - # already specified for this port - existing_listener_found = None - for existing_listener in self.elb.listeners: - # Since ELB allows only one listener on each incoming port, a - # single match on the incoming port is all we're looking for - if existing_listener[0] == int(listener['load_balancer_port']): - existing_listener_found = self._api_listener_as_tuple(existing_listener) - break - - if existing_listener_found: - # Does it match exactly? - if listener_as_tuple != existing_listener_found: - # The ports are the same but something else is different, - # so we'll remove the existing one and add the new one - listeners_to_remove.append(existing_listener_found) - listeners_to_add.append(listener_as_tuple) - else: - # We already have this listener, so we're going to keep it - listeners_to_keep.append(existing_listener_found) - else: - # We didn't find an existing listener, so just add the new one - listeners_to_add.append(listener_as_tuple) - - # Check for any extraneous listeners we need to remove, if desired - if self.purge_listeners: - for existing_listener in self.elb.listeners: - existing_listener_tuple = self._api_listener_as_tuple(existing_listener) - if existing_listener_tuple in listeners_to_remove: - # Already queued for removal - continue - if existing_listener_tuple in listeners_to_keep: - # Keep this one around - continue - # Since we're not already removing it and we don't need to keep - # it, let's get rid of it - listeners_to_remove.append(existing_listener_tuple) - - if listeners_to_remove: - self._delete_elb_listeners(listeners_to_remove) - - if listeners_to_add: - self._create_elb_listeners(listeners_to_add) - - def _api_listener_as_tuple(self, listener): - """Adds ssl_certificate_id to ELB API tuple if present""" - base_tuple = listener.get_complex_tuple() - if listener.ssl_certificate_id and len(base_tuple) < 5: - return base_tuple + (listener.ssl_certificate_id,) - return base_tuple - - def _listener_as_tuple(self, listener): - """Formats listener as a 4- or 5-tuples, in the order specified by the - ELB API""" - # N.B. string manipulations on protocols below (str(), upper()) is to - # ensure format matches output from ELB API - listener_list = [ - int(listener['load_balancer_port']), - int(listener['instance_port']), - str(listener['protocol'].upper()), - ] - - # Instance protocol is not required by ELB API; it defaults to match - # load balancer protocol. We'll mimic that behavior here - if 'instance_protocol' in listener: - listener_list.append(str(listener['instance_protocol'].upper())) - else: - listener_list.append(str(listener['protocol'].upper())) - - if 'ssl_certificate_id' in listener: - listener_list.append(str(listener['ssl_certificate_id'])) - - return tuple(listener_list) - - def _enable_zones(self, zones): - try: - self.elb.enable_zones(zones) - except boto.exception.BotoServerError as e: - self.module.fail_json_aws(e, msg='unable to enable zones') - - self.changed = True - - def _disable_zones(self, zones): - try: - self.elb.disable_zones(zones) - except boto.exception.BotoServerError as e: - self.module.fail_json_aws(e, msg='unable to disable zones') - self.changed = True - - def _attach_subnets(self, subnets): - self.elb_conn.attach_lb_to_subnets(self.name, subnets) - self.changed = True - - def _detach_subnets(self, subnets): - self.elb_conn.detach_lb_from_subnets(self.name, subnets) - self.changed = True - - def _set_subnets(self): - """Determine which subnets need to be attached or detached on the ELB""" - if self.subnets: - if self.purge_subnets: - subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets)) - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - else: - subnets_to_detach = None - subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets)) - - if subnets_to_attach: - self._attach_subnets(subnets_to_attach) - if subnets_to_detach: - self._detach_subnets(subnets_to_detach) - - def _get_scheme(self): - """Determine if the current scheme is different than the scheme of the ELB""" - if self.scheme: - if self.elb.scheme != self.scheme: - if not self.wait: - self.module.fail_json(msg="Unable to modify scheme without using the wait option") - return True - return False - - def _set_zones(self): - """Determine which zones need to be enabled or disabled on the ELB""" - if self.zones: - if self.purge_zones: - zones_to_disable = list(set(self.elb.availability_zones) - - set(self.zones)) - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - else: - zones_to_disable = None - zones_to_enable = list(set(self.zones) - - set(self.elb.availability_zones)) - if zones_to_enable: - self._enable_zones(zones_to_enable) - # N.B. This must come second, in case it would have removed all zones - if zones_to_disable: - self._disable_zones(zones_to_disable) - - def _set_security_groups(self): - if self.security_group_ids is not None and set(self.elb.security_groups) != set(self.security_group_ids): - self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids) - self.changed = True - - def _set_health_check(self): - """Set health check values on ELB as needed""" - if self.health_check: - # This just makes it easier to compare each of the attributes - # and look for changes. Keys are attributes of the current - # health_check; values are desired values of new health_check - health_check_config = { - "target": self._get_health_check_target(), - "timeout": self.health_check['response_timeout'], - "interval": self.health_check['interval'], - "unhealthy_threshold": self.health_check['unhealthy_threshold'], - "healthy_threshold": self.health_check['healthy_threshold'], - } - - update_health_check = False - - # The health_check attribute is *not* set on newly created - # ELBs! So we have to create our own. - if not self.elb.health_check: - self.elb.health_check = HealthCheck() - - for attr, desired_value in health_check_config.items(): - if getattr(self.elb.health_check, attr) != desired_value: - setattr(self.elb.health_check, attr, desired_value) - update_health_check = True - - if update_health_check: - self.elb.configure_health_check(self.elb.health_check) - self.changed = True - - def _check_attribute_support(self, attr): - return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr) - - def _set_cross_az_load_balancing(self): - attributes = self.elb.get_attributes() - if self.cross_az_load_balancing: - if not attributes.cross_zone_load_balancing.enabled: - self.changed = True - attributes.cross_zone_load_balancing.enabled = True - else: - if attributes.cross_zone_load_balancing.enabled: - self.changed = True - attributes.cross_zone_load_balancing.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing', - attributes.cross_zone_load_balancing.enabled) - - def _set_access_log(self): - attributes = self.elb.get_attributes() - if self.access_logs: - if 's3_location' not in self.access_logs: - self.module.fail_json(msg='s3_location information required') - - access_logs_config = { - "enabled": True, - "s3_bucket_name": self.access_logs['s3_location'], - "s3_bucket_prefix": self.access_logs.get('s3_prefix', ''), - "emit_interval": self.access_logs.get('interval', 60), - } - - update_access_logs_config = False - for attr, desired_value in access_logs_config.items(): - if getattr(attributes.access_log, attr) != desired_value: - setattr(attributes.access_log, attr, desired_value) - update_access_logs_config = True - if update_access_logs_config: - self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) - self.changed = True - elif attributes.access_log.enabled: - attributes.access_log.enabled = False - self.changed = True - self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log) - - def _set_connection_draining_timeout(self): - attributes = self.elb.get_attributes() - if self.connection_draining_timeout is not None: - if not attributes.connection_draining.enabled or \ - attributes.connection_draining.timeout != self.connection_draining_timeout: - self.changed = True - attributes.connection_draining.enabled = True - attributes.connection_draining.timeout = self.connection_draining_timeout - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - else: - if attributes.connection_draining.enabled: - self.changed = True - attributes.connection_draining.enabled = False - self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining) - - def _set_idle_timeout(self): - attributes = self.elb.get_attributes() - if self.idle_timeout is not None: - if attributes.connecting_settings.idle_timeout != self.idle_timeout: - self.changed = True - attributes.connecting_settings.idle_timeout = self.idle_timeout - self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings) - - def _policy_name(self, policy_type): - return 'ec2-elb-lb-{0}'.format(to_native(policy_type, errors='surrogate_or_strict')) - - def _create_policy(self, policy_param, policy_meth, policy): - getattr(self.elb_conn, policy_meth)(policy_param, self.elb.name, policy) - - def _delete_policy(self, elb_name, policy): - self.elb_conn.delete_lb_policy(elb_name, policy) - - def _update_policy(self, policy_param, policy_meth, policy_attr, policy): - self._delete_policy(self.elb.name, policy) - self._create_policy(policy_param, policy_meth, policy) - - def _set_listener_policy(self, listeners_dict, policy=None): - policy = [] if policy is None else policy - - for listener_port in listeners_dict: - if listeners_dict[listener_port].startswith('HTTP'): - self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy) - - def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs): - for p in getattr(elb_info.policies, policy_attrs['attr']): - if str(p.__dict__['policy_name']) == str(policy[0]): - if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0): - self._set_listener_policy(listeners_dict) - self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0]) - self.changed = True - break - else: - self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0]) - self.changed = True - - self._set_listener_policy(listeners_dict, policy) - - def select_stickiness_policy(self): - if self.stickiness: - - if 'cookie' in self.stickiness and 'expiration' in self.stickiness: - self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time') - - elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0] - d = {} - for listener in elb_info.listeners: - d[listener[0]] = listener[2] - listeners_dict = d - - if self.stickiness['type'] == 'loadbalancer': - policy = [] - policy_type = 'LBCookieStickinessPolicyType' - - if self.module.boolean(self.stickiness['enabled']): - - if 'expiration' not in self.stickiness: - self.module.fail_json(msg='expiration must be set when type is loadbalancer') - - try: - expiration = self.stickiness['expiration'] if int(self.stickiness['expiration']) else None - except ValueError: - self.module.fail_json(msg='expiration must be set to an integer') - - policy_attrs = { - 'type': policy_type, - 'attr': 'lb_cookie_stickiness_policies', - 'method': 'create_lb_cookie_stickiness_policy', - 'dict_key': 'cookie_expiration_period', - 'param_value': expiration - } - policy.append(self._policy_name(policy_attrs['type'])) - - self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) - elif not self.module.boolean(self.stickiness['enabled']): - if len(elb_info.policies.lb_cookie_stickiness_policies): - if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): - self.changed = True - else: - self.changed = False - self._set_listener_policy(listeners_dict) - self._delete_policy(self.elb.name, self._policy_name(policy_type)) - - elif self.stickiness['type'] == 'application': - policy = [] - policy_type = 'AppCookieStickinessPolicyType' - if self.module.boolean(self.stickiness['enabled']): - - if 'cookie' not in self.stickiness: - self.module.fail_json(msg='cookie must be set when type is application') - - policy_attrs = { - 'type': policy_type, - 'attr': 'app_cookie_stickiness_policies', - 'method': 'create_app_cookie_stickiness_policy', - 'dict_key': 'cookie_name', - 'param_value': self.stickiness['cookie'] - } - policy.append(self._policy_name(policy_attrs['type'])) - self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs) - elif not self.module.boolean(self.stickiness['enabled']): - if len(elb_info.policies.app_cookie_stickiness_policies): - if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type): - self.changed = True - self._set_listener_policy(listeners_dict) - self._delete_policy(self.elb.name, self._policy_name(policy_type)) - - else: - self._set_listener_policy(listeners_dict) - - def _get_backend_policies(self): - """Get a list of backend policies""" - policies = [] - if self.elb.backends is not None: - for backend in self.elb.backends: - if backend.policies is not None: - for policy in backend.policies: - policies.append(str(backend.instance_port) + ':' + policy.policy_name) - - return policies - - def _set_backend_policies(self): - """Sets policies for all backends""" - ensure_proxy_protocol = False - replace = [] - backend_policies = self._get_backend_policies() - - # Find out what needs to be changed - for listener in self.listeners: - want = False - - if 'proxy_protocol' in listener and listener['proxy_protocol']: - ensure_proxy_protocol = True - want = True - - if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies: - if not want: - replace.append({'port': listener['instance_port'], 'policies': []}) - elif want: - replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']}) - - # enable or disable proxy protocol - if ensure_proxy_protocol: - self._set_proxy_protocol_policy() - - # Make the backend policies so - for item in replace: - self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies']) - self.changed = True - - def _get_proxy_protocol_policy(self): - """Find out if the elb has a proxy protocol enabled""" - if self.elb.policies is not None and self.elb.policies.other_policies is not None: - for policy in self.elb.policies.other_policies: - if policy.policy_name == 'ProxyProtocol-policy': - return policy.policy_name - - return None - - def _set_proxy_protocol_policy(self): - """Install a proxy protocol policy if needed""" - proxy_policy = self._get_proxy_protocol_policy() - - if proxy_policy is None: - self.elb_conn.create_lb_policy( - self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True} - ) - self.changed = True - - # TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there - - def _diff_list(self, a, b): - """Find the entries in list a that are not in list b""" - b = set(b) - return [aa for aa in a if aa not in b] - - def _get_instance_ids(self): - """Get the current list of instance ids installed in the elb""" - instances = [] - if self.elb.instances is not None: - for instance in self.elb.instances: - instances.append(instance.id) - - return instances - - def _set_instance_ids(self): - """Register or deregister instances from an lb instance""" - assert_instances = self.instance_ids or [] - - has_instances = self._get_instance_ids() - - add_instances = self._diff_list(assert_instances, has_instances) - if add_instances: - self.elb_conn.register_instances(self.elb.name, add_instances) - self.changed = True - - if self.purge_instance_ids: - remove_instances = self._diff_list(has_instances, assert_instances) - if remove_instances: - self.elb_conn.deregister_instances(self.elb.name, remove_instances) - self.changed = True - - def _set_tags(self): - """Add/Delete tags""" - if self.tags is None: - return - - params = {'LoadBalancerNames.member.1': self.name} - - tagdict = dict() - - # get the current list of tags from the ELB, if ELB exists - if self.elb: - current_tags = self.elb_conn.get_list('DescribeTags', params, - [('member', Tag)]) - tagdict = dict((tag.Key, tag.Value) for tag in current_tags - if hasattr(tag, 'Key')) - - # Add missing tags - dictact = dict(set(self.tags.items()) - set(tagdict.items())) - if dictact: - for i, key in enumerate(dictact): - params['Tags.member.%d.Key' % (i + 1)] = key - params['Tags.member.%d.Value' % (i + 1)] = dictact[key] - - self.elb_conn.make_request('AddTags', params) - self.changed = True - - # Remove extra tags - dictact = dict(set(tagdict.items()) - set(self.tags.items())) - if dictact: - for i, key in enumerate(dictact): - params['Tags.member.%d.Key' % (i + 1)] = key - - self.elb_conn.make_request('RemoveTags', params) - self.changed = True - - def _get_health_check_target(self): - """Compose target string from healthcheck parameters""" - protocol = self.health_check['ping_protocol'].upper() - path = "" - - if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: - path = self.health_check['ping_path'] - - return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) - - -def main(): - argument_spec = dict( - state={'required': True, 'choices': ['present', 'absent']}, - name={'required': True}, - listeners={'default': None, 'required': False, 'type': 'list', 'elements': 'dict'}, - purge_listeners={'default': True, 'required': False, 'type': 'bool'}, - instance_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - purge_instance_ids={'default': False, 'required': False, 'type': 'bool'}, - zones={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - purge_zones={'default': False, 'required': False, 'type': 'bool'}, - security_group_ids={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - security_group_names={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - health_check={'default': None, 'required': False, 'type': 'dict'}, - subnets={'default': None, 'required': False, 'type': 'list', 'elements': 'str'}, - purge_subnets={'default': False, 'required': False, 'type': 'bool'}, - scheme={'default': 'internet-facing', 'required': False, 'choices': ['internal', 'internet-facing']}, - connection_draining_timeout={'default': None, 'required': False, 'type': 'int'}, - idle_timeout={'default': None, 'type': 'int', 'required': False}, - cross_az_load_balancing={'default': None, 'type': 'bool', 'required': False}, - stickiness={'default': None, 'required': False, 'type': 'dict'}, - access_logs={'default': None, 'required': False, 'type': 'dict'}, - wait={'default': False, 'type': 'bool', 'required': False}, - wait_timeout={'default': 60, 'type': 'int', 'required': False}, - tags={'default': None, 'required': False, 'type': 'dict'} - ) - - module = AnsibleAWSModule( - argument_spec=argument_spec, - check_boto3=False, - mutually_exclusive=[['security_group_ids', 'security_group_names']] - ) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module') - - region, ec2_url, aws_connect_params = get_aws_connection_info(module) - if not region: - module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file") - - name = module.params['name'] - state = module.params['state'] - listeners = module.params['listeners'] - purge_listeners = module.params['purge_listeners'] - instance_ids = module.params['instance_ids'] - purge_instance_ids = module.params['purge_instance_ids'] - zones = module.params['zones'] - purge_zones = module.params['purge_zones'] - security_group_ids = module.params['security_group_ids'] - security_group_names = module.params['security_group_names'] - health_check = module.params['health_check'] - access_logs = module.params['access_logs'] - subnets = module.params['subnets'] - purge_subnets = module.params['purge_subnets'] - scheme = module.params['scheme'] - connection_draining_timeout = module.params['connection_draining_timeout'] - idle_timeout = module.params['idle_timeout'] - cross_az_load_balancing = module.params['cross_az_load_balancing'] - stickiness = module.params['stickiness'] - wait = module.params['wait'] - wait_timeout = module.params['wait_timeout'] - tags = module.params['tags'] - - if state == 'present' and not listeners: - module.fail_json(msg="At least one listener is required for ELB creation") - - if state == 'present' and not (zones or subnets): - module.fail_json(msg="At least one availability zone or subnet is required for ELB creation") - - if wait_timeout > 600: - module.fail_json(msg='wait_timeout maximum is 600 seconds') - - if security_group_names: - security_group_ids = [] - try: - ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params) - if subnets: # We have at least one subnet, ergo this is a VPC - vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params) - vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id - filters = {'vpc_id': vpc_id} - else: - filters = None - grp_details = ec2.get_all_security_groups(filters=filters) - - for group_name in security_group_names: - if isinstance(group_name, string_types): - group_name = [group_name] - - group_id = [str(grp.id) for grp in grp_details if str(grp.name) in group_name] - security_group_ids.extend(group_id) - except boto.exception.NoAuthHandlerFound as e: - module.fail_json_aws(e) - - elb_man = ElbManager(module, name, listeners, purge_listeners, zones, - purge_zones, security_group_ids, health_check, - subnets, purge_subnets, scheme, - connection_draining_timeout, idle_timeout, - cross_az_load_balancing, - access_logs, stickiness, wait, wait_timeout, tags, - region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids, - **aws_connect_params) - - # check for unsupported attributes for this version of boto - if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'): - module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute") - - if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'): - module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute") - - if idle_timeout and not elb_man._check_attribute_support('connecting_settings'): - module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute") - - if state == 'present': - elb_man.ensure_ok() - elif state == 'absent': - elb_man.ensure_gone() - - ansible_facts = {'ec2_elb': 'info'} - ec2_facts_result = dict(changed=elb_man.changed, - elb=elb_man.get_info(), - ansible_facts=ansible_facts) - - module.exit_json(**ec2_facts_result) - - -if __name__ == '__main__': - main() diff --git a/plugins/modules/elb_classic_lb.py b/plugins/modules/elb_classic_lb.py new file mode 100644 index 00000000000..ffde6deefb2 --- /dev/null +++ b/plugins/modules/elb_classic_lb.py @@ -0,0 +1,2156 @@ +#!/usr/bin/python +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: elb_classic_lb +version_added: 1.0.0 +description: + - Creates, updates or destroys an Amazon Elastic Load Balancer (ELB). + - This module was renamed from M(ec2_elb_lb) to M(elb_classic_lb) in version + 2.1.0 of the amazon.aws collection. +short_description: creates, updates or destroys an Amazon ELB. +author: + - "Jim Dalton (@jsdalton)" + - "Mark Chappell (@tremble)" +options: + state: + description: + - Create or destroy the ELB. + type: str + choices: [ absent, present ] + required: true + name: + description: + - The name of the ELB. + - The name of an ELB must be less than 32 characters and unique per-region per-account. + type: str + required: true + listeners: + description: + - List of ports/protocols for this ELB to listen on (see examples). + - Required when I(state=present) and the ELB doesn't exist. + type: list + elements: dict + suboptions: + load_balancer_port: + description: + - The port on which the load balancer will listen. + type: int + required: True + instance_port: + description: + - The port on which the instance is listening. + type: int + required: True + ssl_certificate_id: + description: + - The Amazon Resource Name (ARN) of the SSL certificate. + type: str + protocol: + description: + - The transport protocol to use for routing. + - Valid values are C(HTTP), C(HTTPS), C(TCP), or C(SSL). + type: str + required: True + instance_protocol: + description: + - The protocol to use for routing traffic to instances. + - Valid values are C(HTTP), C(HTTPS), C(TCP), or C(SSL), + type: str + proxy_protocol: + description: + - Enable proxy protocol for the listener. + - Beware, ELB controls for the proxy protocol are based on the + I(instance_port). If you have multiple listeners talking to + the same I(instance_port), this will affect all of them. + type: bool + purge_listeners: + description: + - Purge existing listeners on ELB that are not found in listeners. + type: bool + default: true + instance_ids: + description: + - List of instance ids to attach to this ELB. + type: list + elements: str + purge_instance_ids: + description: + - Purge existing instance ids on ELB that are not found in I(instance_ids). + type: bool + default: false + zones: + description: + - List of availability zones to enable on this ELB. + - Mutually exclusive with I(subnets). + type: list + elements: str + purge_zones: + description: + - Purge existing availability zones on ELB that are not found in I(zones). + type: bool + default: false + security_group_ids: + description: + - A list of security groups to apply to the ELB. + type: list + elements: str + security_group_names: + description: + - A list of security group names to apply to the ELB. + type: list + elements: str + health_check: + description: + - A dictionary of health check configuration settings (see examples). + type: dict + suboptions: + ping_protocol: + description: + - The protocol which the ELB health check will use when performing a + health check. + - Valid values are C('HTTP'), C('HTTPS'), C('TCP') and C('SSL'). + required: true + type: str + ping_path: + description: + - The URI path which the ELB health check will query when performing a + health check. + - Required when I(ping_protocol=HTTP) or I(ping_protocol=HTTPS). + required: false + type: str + ping_port: + description: + - The TCP port to which the ELB will connect when performing a + health check. + required: true + type: int + interval: + description: + - The approximate interval, in seconds, between health checks of an individual instance. + required: true + type: int + timeout: + description: + - The amount of time, in seconds, after which no response means a failed health check. + aliases: ['response_timeout'] + required: true + type: int + unhealthy_threshold: + description: + - The number of consecutive health check failures required before moving + the instance to the Unhealthy state. + required: true + type: int + healthy_threshold: + description: + - The number of consecutive health checks successes required before moving + the instance to the Healthy state. + required: true + type: int + access_logs: + description: + - A dictionary of access logs configuration settings (see examples). + type: dict + suboptions: + enabled: + description: + - When set to C(True) will configure delivery of access logs to an S3 + bucket. + - When set to C(False) will disable delivery of access logs. + required: false + type: bool + default: true + s3_location: + description: + - The S3 bucket to deliver access logs to. + - See U(https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) + for more information about the necessary S3 bucket policies. + - Required when I(enabled=True). + required: false + type: str + s3_prefix: + description: + - Where in the S3 bucket to deliver the logs. + - If the prefix is not provided or set to C(""), the log is placed at the root level of the bucket. + required: false + type: str + default: "" + interval: + description: + - The interval for publishing the access logs to S3. + required: false + type: int + default: 60 + choices: [ 5, 60 ] + subnets: + description: + - A list of VPC subnets to use when creating the ELB. + - Mutually exclusive with I(zones). + type: list + elements: str + purge_subnets: + description: + - Purge existing subnets on the ELB that are not found in I(subnets). + - Because it is not permitted to add multiple subnets from the same + availability zone, subnets to be purged will be removed before new + subnets are added. This may cause a brief outage if you try to replace + all subnets at once. + type: bool + default: false + scheme: + description: + - The scheme to use when creating the ELB. + - For a private VPC-visible ELB use C(internal). + - If you choose to update your scheme with a different value the ELB will be destroyed and + a new ELB created. + - Defaults to I(scheme=internet-facing). + type: str + choices: ["internal", "internet-facing"] + connection_draining_timeout: + description: + - Wait a specified timeout allowing connections to drain before terminating an instance. + - Set to C(0) to disable connection draining. + type: int + idle_timeout: + description: + - ELB connections from clients and to servers are timed out after this amount of time. + type: int + cross_az_load_balancing: + description: + - Distribute load across all configured Availability Zones. + - Defaults to C(false). + type: bool + stickiness: + description: + - A dictionary of stickiness policy settings. + - Policy will be applied to all listeners (see examples). + type: dict + suboptions: + type: + description: + - The type of stickiness policy to apply. + - Required if I(enabled=true). + - Ignored if I(enabled=false). + required: false + type: 'str' + choices: ['application','loadbalancer'] + enabled: + description: + - When I(enabled=false) session stickiness will be disabled for all listeners. + required: false + type: bool + default: true + cookie: + description: + - The name of the application cookie used for stickiness. + - Required if I(enabled=true) and I(type=application). + - Ignored if I(enabled=false). + required: false + type: str + expiration: + description: + - The time period, in seconds, after which the cookie should be considered stale. + - If this parameter is not specified, the stickiness session lasts for the duration of the browser session. + - Ignored if I(enabled=false). + required: false + type: int + wait: + description: + - When creating, deleting, or adding instances to an ELB, if I(wait=true) + Ansible will wait for both the load balancer and related network interfaces + to finish creating/deleting. + - Support for waiting when adding instances was added in release 2.1.0. + type: bool + default: false + wait_timeout: + description: + - Used in conjunction with wait. Number of seconds to wait for the ELB to be terminated. + - A maximum of 600 seconds (10 minutes) is allowed. + type: int + default: 180 + tags: + description: + - A dictionary of tags to apply to the ELB. + - To delete all tags supply an empty dict (C({})) and set + I(purge_tags=true). + type: dict + purge_tags: + description: + - Whether to remove existing tags that aren't passed in the I(tags) parameter. + type: bool + default: true + version_added: 2.1.0 + +extends_documentation_fragment: +- amazon.aws.aws +- amazon.aws.ec2 +''' + +EXAMPLES = """ +# Note: None of these examples set aws_access_key, aws_secret_key, or region. +# It is assumed that their matching environment variables are set. + +# Basic provisioning example (non-VPC) + +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + proxy_protocol: True + - protocol: https + load_balancer_port: 443 + instance_protocol: http # optional, defaults to value of protocol setting + instance_port: 80 + # ssl certificate required for https or ssl + ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert" + +# Internal ELB example + +- amazon.aws.elb_classic_lb: + name: "test-vpc" + scheme: internal + state: present + instance_ids: + - i-abcd1234 + purge_instance_ids: true + subnets: + - subnet-abcd1234 + - subnet-1a2b3c4d + listeners: + - protocol: http # options are http, https, ssl, tcp + load_balancer_port: 80 + instance_port: 80 + +# Configure a health check and the access logs +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 + access_logs: + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" + +# Ensure ELB is gone +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: absent + +# Ensure ELB is gone and wait for check (for default timeout) +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: absent + wait: yes + +# Ensure ELB is gone and wait for check with timeout value +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: absent + wait: yes + wait_timeout: 600 + +# Normally, this module will purge any listeners that exist on the ELB +# but aren't specified in the listeners parameter. If purge_listeners is +# false it leaves them alone +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_listeners: no + +# Normally, this module will leave availability zones that are enabled +# on the ELB alone. If purge_zones is true, then any extraneous zones +# will be removed +- amazon.aws.elb_classic_lb: + name: "test-please-delete" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + purge_zones: yes + +# Creates a ELB and assigns a list of subnets to it. +- amazon.aws.elb_classic_lb: + state: present + name: 'New ELB' + security_group_ids: 'sg-123456, sg-67890' + subnets: 'subnet-123456,subnet-67890' + purge_subnets: yes + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with connection draining, increased idle timeout and cross availability +# zone load balancing +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + connection_draining_timeout: 60 + idle_timeout: 300 + cross_az_load_balancing: "yes" + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + +# Create an ELB with load balancer stickiness enabled +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: loadbalancer + enabled: yes + expiration: 300 + +# Create an ELB with application stickiness enabled +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + stickiness: + type: application + enabled: yes + cookie: SESSIONID + +# Create an ELB and add tags +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: + Name: "New ELB" + stack: "production" + client: "Bob" + +# Delete all tags from an ELB +- amazon.aws.elb_classic_lb: + name: "New ELB" + state: present + zones: + - us-east-1a + - us-east-1d + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + tags: {} +""" + +RETURN = ''' +elb: + description: Load Balancer attributes + returned: always + type: dict + contains: + app_cookie_policy: + description: The name of the policy used to control if the ELB is using a application cookie stickiness policy. + type: str + sample: ec2-elb-lb-AppCookieStickinessPolicyType + returned: when state is not 'absent' + backends: + description: A description of the backend policy applied to the ELB (instance-port:policy-name). + type: str + sample: 8181:ProxyProtocol-policy + returned: when state is not 'absent' + connection_draining_timeout: + description: The maximum time, in seconds, to keep the existing connections open before deregistering the instances. + type: int + sample: 25 + returned: when state is not 'absent' + cross_az_load_balancing: + description: Either C('yes') if cross-AZ load balancing is enabled, or C('no') if cross-AZ load balancing is disabled. + type: str + sample: 'yes' + returned: when state is not 'absent' + dns_name: + description: The DNS name of the ELB. + type: str + sample: internal-ansible-test-935c585850ac-1516306744.us-east-1.elb.amazonaws.com + returned: when state is not 'absent' + health_check: + description: A dictionary describing the health check used for the ELB. + type: dict + returned: when state is not 'absent' + contains: + healthy_threshold: + description: The number of consecutive successful health checks before marking an instance as healthy. + type: int + sample: 2 + interval: + description: The time, in seconds, between each health check. + type: int + sample: 10 + target: + description: The Protocol, Port, and for HTTP(S) health checks the path tested by the health check. + type: str + sample: TCP:22 + timeout: + description: The time, in seconds, after which an in progress health check is considered failed due to a timeout. + type: int + sample: 5 + unhealthy_threshold: + description: The number of consecutive failed health checks before marking an instance as unhealthy. + type: int + sample: 2 + hosted_zone_id: + description: The ID of the Amazon Route 53 hosted zone for the load balancer. + type: str + sample: Z35SXDOTRQ7X7K + returned: when state is not 'absent' + hosted_zone_name: + description: The DNS name of the load balancer when using a custom hostname. + type: str + sample: 'ansible-module.example' + returned: when state is not 'absent' + idle_timeout: + description: The length of of time before an idle connection is dropped by the ELB. + type: int + sample: 50 + returned: when state is not 'absent' + in_service_count: + description: The number of instances attached to the ELB in an in-service state. + type: int + sample: 1 + returned: when state is not 'absent' + instance_health: + description: A list of dictionaries describing the health of each instance attached to the ELB. + type: list + elements: dict + returned: when state is not 'absent' + contains: + description: + description: A human readable description of why the instance is not in service. + type: str + sample: N/A + returned: when state is not 'absent' + instance_id: + description: The ID of the instance. + type: str + sample: i-03dcc8953a03d6435 + returned: when state is not 'absent' + reason_code: + description: A code describing why the instance is not in service. + type: str + sample: N/A + returned: when state is not 'absent' + state: + description: The current service state of the instance. + type: str + sample: InService + returned: when state is not 'absent' + instances: + description: A list of the IDs of instances attached to the ELB. + type: list + elements: str + sample: ['i-03dcc8953a03d6435'] + returned: when state is not 'absent' + lb_cookie_policy: + description: The name of the policy used to control if the ELB is using a cookie stickiness policy. + type: str + sample: ec2-elb-lb-LBCookieStickinessPolicyType + returned: when state is not 'absent' + listeners: + description: + - A list of lists describing the listeners attached to the ELB. + - The nested list contains the listener port, the instance port, the listener protoco, the instance port, + and where appropriate the ID of the SSL certificate for the port. + type: list + elements: list + sample: [[22, 22, 'TCP', 'TCP'], [80, 8181, 'HTTP', 'HTTP']] + returned: when state is not 'absent' + name: + description: The name of the ELB. This name is unique per-region, per-account. + type: str + sample: ansible-test-935c585850ac + returned: when state is not 'absent' + out_of_service_count: + description: The number of instances attached to the ELB in an out-of-service state. + type: int + sample: 0 + returned: when state is not 'absent' + proxy_policy: + description: The name of the policy used to control if the ELB operates using the Proxy protocol. + type: str + sample: ProxyProtocol-policy + returned: when the proxy protocol policy exists. + region: + description: The AWS region in which the ELB is running. + type: str + sample: us-east-1 + returned: always + scheme: + description: Whether the ELB is an C('internal') or a C('internet-facing') load balancer. + type: str + sample: internal + returned: when state is not 'absent' + security_group_ids: + description: A list of the IDs of the Security Groups attached to the ELB. + type: list + elements: str + sample: ['sg-0c12ebd82f2fb97dc', 'sg-01ec7378d0c7342e6'] + returned: when state is not 'absent' + status: + description: A minimal description of the current state of the ELB. Valid values are C('exists'), C('gone'), C('deleted'), C('created'). + type: str + sample: exists + returned: always + subnets: + description: A list of the subnet IDs attached to the ELB. + type: list + elements: str + sample: ['subnet-00d9d0f70c7e5f63c', 'subnet-03fa5253586b2d2d5'] + returned: when state is not 'absent' + tags: + description: A dictionary describing the tags attached to the ELB. + type: dict + sample: {'Name': 'ansible-test-935c585850ac', 'ExampleTag': 'Example Value'} + returned: when state is not 'absent' + unknown_instance_state_count: + description: The number of instances attached to the ELB in an unknown state. + type: int + sample: 0 + returned: when state is not 'absent' + zones: + description: A list of the AWS regions in which the ELB is running. + type: list + elements: str + sample: ['us-east-1b', 'us-east-1a'] + returned: when state is not 'absent' +''' + +try: + import botocore +except ImportError: + pass # Taken care of by AnsibleAWSModule + +from ..module_utils.core import AnsibleAWSModule +from ..module_utils.core import is_boto3_error_code +from ..module_utils.core import scrub_none_parameters +from ..module_utils.ec2 import AWSRetry +from ..module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ..module_utils.ec2 import ansible_dict_to_boto3_tag_list +from ..module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ..module_utils.ec2 import camel_dict_to_snake_dict +from ..module_utils.ec2 import compare_aws_tags +from ..module_utils.ec2 import snake_dict_to_camel_dict + +from ..module_utils.ec2 import get_ec2_security_group_ids_from_names +from ..module_utils.waiters import get_waiter + + +class ElbManager(object): + """Handles ELB creation and destruction""" + + def __init__(self, module): + + self.module = module + + self.name = module.params['name'] + self.listeners = module.params['listeners'] + self.purge_listeners = module.params['purge_listeners'] + self.instance_ids = module.params['instance_ids'] + self.purge_instance_ids = module.params['purge_instance_ids'] + self.zones = module.params['zones'] + self.purge_zones = module.params['purge_zones'] + self.health_check = module.params['health_check'] + self.access_logs = module.params['access_logs'] + self.subnets = module.params['subnets'] + self.purge_subnets = module.params['purge_subnets'] + self.scheme = module.params['scheme'] + self.connection_draining_timeout = module.params['connection_draining_timeout'] + self.idle_timeout = module.params['idle_timeout'] + self.cross_az_load_balancing = module.params['cross_az_load_balancing'] + self.stickiness = module.params['stickiness'] + self.wait = module.params['wait'] + self.wait_timeout = module.params['wait_timeout'] + self.tags = module.params['tags'] + self.purge_tags = module.params['purge_tags'] + + self.changed = False + self.status = 'gone' + + retry_decorator = AWSRetry.jittered_backoff() + self.client = self.module.client('elb', retry_decorator=retry_decorator) + self.ec2_client = self.module.client('ec2', retry_decorator=retry_decorator) + + security_group_names = module.params['security_group_names'] + self.security_group_ids = module.params['security_group_ids'] + + self._update_descriptions() + + if security_group_names: + # Use the subnets attached to the VPC to find which VPC we're in and + # limit the search + if self.elb.get('Subnets'): + subnets = set(self.elb.get('Subnets') + list(self.subnets or [])) + else: + subnets = set(self.subnets) + if subnets: + vpc_id = self._get_vpc_from_subnets(subnets) + else: + vpc_id = None + try: + self.security_group_ids = self._get_ec2_security_group_ids_from_names( + sec_group_list=security_group_names, vpc_id=vpc_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to convert security group names to IDs, try using security group IDs rather than names") + + def _update_descriptions(self): + try: + self.elb = self._get_elb() + except (botocore.exceptions.ClientException, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer') + try: + self.elb_attributes = self._get_elb_attributes() + except (botocore.exceptions.ClientException, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + try: + self.elb_policies = self._get_elb_policies() + except (botocore.exceptions.ClientException, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer policies') + try: + self.elb_health = self._get_elb_instance_health() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer instance health') + + # We have a number of complex parameters which can't be validated by + # AnsibleModule or are only required if the ELB doesn't exist. + def validate_params(self, state=None): + problem_found = False + # Validate that protocol is one of the permitted values + problem_found |= self._validate_listeners(self.listeners) + problem_found |= self._validate_health_check(self.health_check) + problem_found |= self._validate_stickiness(self.stickiness) + if state == 'present': + # When creating a new ELB + problem_found |= self._validate_creation_requirements() + problem_found |= self._validate_access_logs(self.access_logs) + + # Pass check_mode down through to the module + @property + def check_mode(self): + return self.module.check_mode + + def _get_elb_policies(self): + try: + attributes = self.client.describe_load_balancer_policies(LoadBalancerName=self.name) + except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + return {} + except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + # Be forgiving if we can't see the attributes + # Note: This will break idempotency if someone has set but not describe + self.module.warn('Access Denied trying to describe load balancer policies') + return {} + return attributes['PolicyDescriptions'] + + def _get_elb_instance_health(self): + try: + instance_health = self.client.describe_instance_health(LoadBalancerName=self.name) + except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + return [] + except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + # Be forgiving if we can't see the attributes + # Note: This will break idempotency if someone has set but not describe + self.module.warn('Access Denied trying to describe instance health') + return [] + return instance_health['InstanceStates'] + + def _get_elb_attributes(self): + try: + attributes = self.client.describe_load_balancer_attributes(LoadBalancerName=self.name) + except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + return {} + except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + # Be forgiving if we can't see the attributes + # Note: This will break idempotency if someone has set but not describe + self.module.warn('Access Denied trying to describe load balancer attributes') + return {} + return attributes['LoadBalancerAttributes'] + + def _get_elb(self): + try: + elbs = self._describe_loadbalancer(self.name) + except is_boto3_error_code('LoadBalancerNotFound'): + return None + + # Shouldn't happen, but Amazon could change the rules on us... + if len(elbs) > 1: + self.module.fail_json('Found multiple ELBs with name {0}'.format(self.name)) + + self.status = 'exists' if self.status == 'gone' else self.status + + return elbs[0] + + def _delete_elb(self): + # True if succeeds, exception raised if not + try: + if not self.check_mode: + self.client.delete_load_balancer(aws_retry=True, LoadBalancerName=self.name) + self.changed = True + self.status = 'deleted' + except is_boto3_error_code('LoadBalancerNotFound'): + return False + return True + + def _create_elb(self): + listeners = list(self._format_listener(l) for l in self.listeners) + if not self.scheme: + self.scheme = 'internet-facing' + params = dict( + LoadBalancerName=self.name, + AvailabilityZones=self.zones, + SecurityGroups=self.security_group_ids, + Subnets=self.subnets, + Listeners=listeners, + Scheme=self.scheme) + params = scrub_none_parameters(params) + if self.tags: + params['Tags'] = ansible_dict_to_boto3_tag_list(self.tags) + + if not self.check_mode: + self.client.create_load_balancer(aws_retry=True, **params) + # create_load_balancer only returns the DNS name + self.elb = self._get_elb() + self.changed = True + self.status = 'created' + return True + + def _format_listener(self, listener, inject_protocol=False): + """Formats listener into the format needed by the + ELB API""" + + listener = scrub_none_parameters(listener) + + for protocol in ['protocol', 'instance_protocol']: + if protocol in listener: + listener[protocol] = listener[protocol].upper() + + if inject_protocol and 'instance_protocol' not in listener: + listener['instance_protocol'] = listener['protocol'] + + # Remove proxy_protocol, it has to be handled as a policy + listener.pop('proxy_protocol', None) + + ssl_id = listener.pop('ssl_certificate_id', None) + + formatted_listener = snake_dict_to_camel_dict(listener, True) + if ssl_id: + formatted_listener['SSLCertificateId'] = ssl_id + + return snake_dict_to_camel_dict(listener, True) + + def _format_healthcheck_target(self): + """Compose target string from healthcheck parameters""" + protocol = self.health_check['ping_protocol'].upper() + path = "" + + if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: + path = self.health_check['ping_path'] + + return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + + def _format_healthcheck(self): + return dict( + Target=self._format_healthcheck_target(), + Timeout=self.health_check['timeout'], + Interval=self.health_check['interval'], + UnhealthyThreshold=self.health_check['unhealthy_threshold'], + HealthyThreshold=self.health_check['healthy_threshold'], + ) + + def ensure_ok(self): + """Create the ELB""" + if not self.elb: + try: + self._create_elb() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create load balancer") + try: + self.elb_attributes = self._get_elb_attributes() + except (botocore.exceptions.ClientException, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self._wait_created() + + # Some attributes are configured on creation, others need to be updated + # after creation. Skip updates for those set on creation + else: + if self._check_scheme(): + # XXX We should probably set 'None' parameters based on the + # current state prior to deletion + + # the only way to change the scheme is by recreating the resource + self.ensure_gone() + # We need to wait for it to be gone-gone + self._wait_gone(True) + try: + self._create_elb() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to recreate load balancer") + try: + self.elb_attributes = self._get_elb_attributes() + except (botocore.exceptions.ClientException, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + else: + self._set_subnets() + self._set_zones() + self._set_security_groups() + self._set_elb_listeners() + self._set_tags() + + self._set_health_check() + self._set_elb_attributes() + self._set_backend_policies() + self._set_stickiness_policies() + self._set_instance_ids() + +# if self._check_attribute_support('access_log'): +# self._set_access_log() + + def ensure_gone(self): + """Destroy the ELB""" + if self.elb: + try: + self._delete_elb() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to delete load balancer") + self._wait_gone() + + def _wait_gone(self, wait=None): + if not wait and not self.wait: + return + try: + elb_removed = self._wait_for_elb_removed() + # Unfortunately even though the ELB itself is removed quickly + # the interfaces take longer so reliant security groups cannot + # be deleted until the interface has registered as removed. + elb_interface_removed = self._wait_for_elb_interface_removed() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed while waiting for load balancer deletion") + + def _wait_created(self, wait=False): + if not wait and not self.wait: + return + try: + self._wait_for_elb_created() + # Can take longer than creation + self._wait_for_elb_interface_created() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed while waiting for load balancer deletion") + + def get_load_balancer(self): + self._update_descriptions() + elb = dict(self.elb or {}) + if not elb: + return {} + + elb['LoadBalancerAttributes'] = self.elb_attributes + elb['LoadBalancerPolicies'] = self.elb_policies + load_balancer = camel_dict_to_snake_dict(elb) + try: + load_balancer['tags'] = self._get_tags() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get load balancer tags") + + return load_balancer + + def get_info(self): + self._update_descriptions() + + if not self.elb: + return dict( + name=self.name, + status=self.status, + region=self.module.region + ) + check_elb = dict(self.elb) + check_elb_attrs = dict(self.elb_attributes or {}) + check_policies = check_elb.get('Policies', {}) + try: + lb_cookie_policy = check_policies['LBCookieStickinessPolicies'][0]['PolicyName'] + except (KeyError, IndexError): + lb_cookie_policy = None + try: + app_cookie_policy = check_policies['AppCookieStickinessPolicies'][0]['PolicyName'] + except (KeyError, IndexError): + app_cookie_policy = None + + health_check = camel_dict_to_snake_dict(check_elb.get('HealthCheck', {})) + + backend_policies = list() + for port, policies in self._get_backend_policies().items(): + for policy in policies: + backend_policies.append("{0}:{1}".format(port, policy)) + + info = dict( + name=check_elb.get('LoadBalancerName'), + dns_name=check_elb.get('DNSName'), + zones=check_elb.get('AvailabilityZones'), + security_group_ids=check_elb.get('SecurityGroups'), + status=self.status, + subnets=check_elb.get('Subnets'), + scheme=check_elb.get('Scheme'), + hosted_zone_name=check_elb.get('CanonicalHostedZoneName'), + hosted_zone_id=check_elb.get('CanonicalHostedZoneNameID'), + lb_cookie_policy=lb_cookie_policy, + app_cookie_policy=app_cookie_policy, + proxy_policy=self._get_proxy_protocol_policy(), + backends=backend_policies, + instances=self._get_instance_ids(), + out_of_service_count=0, + in_service_count=0, + unknown_instance_state_count=0, + region=self.module.region, + health_check=health_check, + ) + + instance_health = camel_dict_to_snake_dict(dict(InstanceHealth=self.elb_health)) + info.update(instance_health) + + # instance state counts: InService or OutOfService + if info['instance_health']: + for instance_state in info['instance_health']: + if instance_state['state'] == "InService": + info['in_service_count'] += 1 + elif instance_state['state'] == "OutOfService": + info['out_of_service_count'] += 1 + else: + info['unknown_instance_state_count'] += 1 + + listeners = check_elb.get('ListenerDescriptions', []) + if listeners: + info['listeners'] = list( + self._api_listener_as_tuple(l['Listener']) for l in listeners + ) + else: + info['listeners'] = [] + + try: + info['connection_draining_timeout'] = check_elb_attrs['ConnectionDraining']['Timeout'] + except KeyError: + pass + try: + info['idle_timeout'] = check_elb_attrs['ConnectionSettings']['IdleTimeout'] + except KeyError: + pass + try: + is_enabled = check_elb_attrs['CrossZoneLoadBalancing']['Enabled'] + info['cross_az_load_balancing'] = 'yes' if is_enabled else 'no' + except KeyError: + pass + + # # return stickiness info? + + try: + info['tags'] = self._get_tags() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get load balancer tags") + + return info + + @property + def _waiter_config(self): + delay = min(10, self.wait_timeout) + max_attempts = (self.wait_timeout // delay) + return {'Delay': delay, 'MaxAttempts': max_attempts} + + def _wait_for_elb_created(self): + if self.check_mode: + return True + + waiter = get_waiter(self.client, 'load_balancer_created') + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + LoadBalancerNames=[self.name], + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + + return True + + def _wait_for_elb_interface_created(self): + if self.check_mode: + return True + waiter = get_waiter(self.ec2_client, 'network_interface_available') + + filters = ansible_dict_to_boto3_filter_list( + {'requester-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name)} + ) + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + Filters=filters, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + + return True + + def _wait_for_elb_removed(self): + if self.check_mode: + return True + + waiter = get_waiter(self.client, 'load_balancer_deleted') + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + LoadBalancerNames=[self.name], + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + + return True + + def _wait_for_elb_interface_removed(self): + if self.check_mode: + return True + + waiter = get_waiter(self.ec2_client, 'network_interface_deleted') + + filters = ansible_dict_to_boto3_filter_list( + {'requester-id': 'amazon-elb', + 'description': 'ELB {0}'.format(self.name)} + ) + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + Filters=filters, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + + return True + + def _wait_for_instance_state(self, waiter_name, instances): + if not instances: + return False + + if self.check_mode: + return True + + waiter = get_waiter(self.client, waiter_name) + + instance_list = list(dict(InstanceId=instance) for instance in instances) + + try: + waiter.wait( + WaiterConfig=self._waiter_config, + LoadBalancerName=self.name, + Instances=instance_list, + ) + except botocore.exceptions.WaiterError as e: + self.module.fail_json_aws(e, 'Timeout waiting for ELB Instance State') + + return True + + def _create_elb_listeners(self, listeners): + """Takes a list of listener definitions and creates them""" + if not listeners: + return False + self.changed = True + if self.check_mode: + return True + + self.client.create_load_balancer_listeners( + aws_retry=True, + LoadBalancerName=self.name, + Listeners=listeners, + ) + return True + + def _delete_elb_listeners(self, ports): + """Takes a list of listener ports and deletes them from the ELB""" + if not ports: + return False + self.changed = True + if self.check_mode: + return True + + self.client.delete_load_balancer_listeners( + aws_retry=True, + LoadBalancerName=self.name, + LoadBalancerPorts=ports, + ) + return True + + def _set_elb_listeners(self): + """ + Creates listeners specified by self.listeners; overwrites existing + listeners on these ports; removes extraneous listeners + """ + + if not self.listeners: + return False + + # We can't use sets here: dicts aren't hashable, so convert to the boto3 + # format and use a generator to filter + new_listeners = list(self._format_listener(l, True) for l in self.listeners) + existing_listeners = list(l['Listener'] for l in self.elb['ListenerDescriptions']) + listeners_to_remove = list(l for l in existing_listeners if l not in new_listeners) + listeners_to_add = list(l for l in new_listeners if l not in existing_listeners) + + changed = False + + if self.purge_listeners: + ports_to_remove = list(l['LoadBalancerPort'] for l in listeners_to_remove) + else: + old_ports = set(l['LoadBalancerPort'] for l in listeners_to_remove) + new_ports = set(l['LoadBalancerPort'] for l in listeners_to_add) + # If we're not purging, then we need to remove Listeners + # where the full definition doesn't match, but the port does + ports_to_remove = list(old_ports & new_ports) + + # Update is a delete then add, so do the deletion first + try: + changed |= self._delete_elb_listeners(ports_to_remove) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove listeners from load balancer") + try: + changed |= self._create_elb_listeners(listeners_to_add) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove listeners from load balancer") + + return changed + + def _api_listener_as_tuple(self, listener): + """Adds ssl_certificate_id to ELB API tuple if present""" + base_tuple = [ + listener.get('LoadBalancerPort'), + listener.get('InstancePort'), + listener.get('Protocol'), + listener.get('InstanceProtocol'), + ] + if listener.get('SSLCertificateId', False): + base_tuple.append(listener.get('SSLCertificateId')) + return tuple(base_tuple) + + def _attach_subnets(self, subnets): + if not subnets: + return False + self.changed = True + if self.check_mode: + return True + self.client.attach_load_balancer_to_subnets( + aws_retry=True, + LoadBalancerName=self.name, + Subnets=subnets) + return True + + def _detach_subnets(self, subnets): + if not subnets: + return False + self.changed = True + if self.check_mode: + return True + self.client.detach_load_balancer_from_subnets( + aws_retry=True, + LoadBalancerName=self.name, + Subnets=subnets) + return True + + def _set_subnets(self): + """Determine which subnets need to be attached or detached on the ELB""" + # Subnets parameter not set, nothing to change + if self.subnets is None: + return False + + changed = False + + if self.purge_subnets: + subnets_to_detach = list(set(self.elb['Subnets']) - set(self.subnets)) + else: + subnets_to_detach = list() + subnets_to_attach = list(set(self.subnets) - set(self.elb['Subnets'])) + + # You can't add multiple subnets from the same AZ. Remove first, then + # add. + try: + changed |= self._detach_subnets(subnets_to_detach) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to detach subnets from load balancer") + try: + changed |= self._attach_subnets(subnets_to_attach) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to attach subnets to load balancer") + + return changed + + def _check_scheme(self): + """Determine if the current scheme is different than the scheme of the ELB""" + if self.scheme: + if self.elb['Scheme'] != self.scheme: + return True + return False + + def _enable_zones(self, zones): + if not zones: + return False + self.changed = True + if self.check_mode: + return True + + try: + self.client.enable_availability_zones_for_load_balancer( + aws_retry=True, + LoadBalancerName=self.name, + AvailabilityZones=zones, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg='Failed to enable zones for load balancer') + return True + + def _disable_zones(self, zones): + if not zones: + return False + self.changed = True + if self.check_mode: + return True + + try: + self.client.disable_availability_zones_for_load_balancer( + aws_retry=True, + LoadBalancerName=self.name, + AvailabilityZones=zones, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg='Failed to disable zones for load balancer') + return True + + def _set_zones(self): + """Determine which zones need to be enabled or disabled on the ELB""" + # zones parameter not set, nothing to changeA + if self.zones is None: + return False + + changed = False + + if self.purge_zones: + zones_to_disable = list(set(self.elb['AvailabilityZones']) - set(self.zones)) + else: + zones_to_disable = list() + zones_to_enable = list(set(self.zones) - set(self.elb['AvailabilityZones'])) + + # Add before we remove to reduce the chance of an outage if someone + # replaces all zones at once + try: + changed |= self._enable_zones(zones_to_enable) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to enable zone on load balancer") + try: + changed |= self._disable_zones(zones_to_disable) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to attach zone to load balancer") + + return changed + + def _set_security_groups(self): + if not self.security_group_ids: + return False + # Security Group Names should already by converted to IDs by this point. + if set(self.elb['SecurityGroups']) == set(self.security_group_ids): + return False + + self.changed = True + + if self.check_mode: + return True + + try: + self.client.apply_security_groups_to_load_balancer( + aws_retry=True, + LoadBalancerName=self.name, + SecurityGroups=self.security_group_ids, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to apply security groups to load balancer") + return True + + def _set_health_check(self): + if not self.health_check: + return False + + """Set health check values on ELB as needed""" + health_check_config = self._format_healthcheck() + + if health_check_config == self.elb['HealthCheck']: + return False + + self.changed = True + if self.check_mode: + return True + try: + self.client.configure_health_check( + aws_retry=True, + LoadBalancerName=self.name, + HealthCheck=health_check_config, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to apply healthcheck to load balancer") + + return True + + def _set_elb_attributes(self): + attributes = {} + if self.cross_az_load_balancing is not None: + attr = dict(Enabled=self.cross_az_load_balancing) + if not self.elb_attributes.get('CrossZoneLoadBalancing', None) == attr: + attributes['CrossZoneLoadBalancing'] = attr + + if self.idle_timeout is not None: + attr = dict(IdleTimeout=self.idle_timeout) + if not self.elb_attributes.get('ConnectionSettings', None) == attr: + attributes['ConnectionSettings'] = attr + + if self.connection_draining_timeout is not None: + curr_attr = dict(self.elb_attributes.get('ConnectionDraining', {})) + if self.connection_draining_timeout == 0: + attr = dict(Enabled=False) + curr_attr.pop('Timeout', None) + else: + attr = dict(Enabled=True, Timeout=self.connection_draining_timeout) + if not curr_attr == attr: + attributes['ConnectionDraining'] = attr + + if self.access_logs is not None: + curr_attr = dict(self.elb_attributes.get('AccessLog', {})) + # For disabling we only need to compare and pass 'Enabled' + if not self.access_logs.get('enabled'): + curr_attr = dict(Enabled=curr_attr.get('Enabled', False)) + attr = dict(Enabled=self.access_logs.get('enabled')) + else: + attr = dict( + Enabled=True, + S3BucketName=self.access_logs['s3_location'], + S3BucketPrefix=self.access_logs.get('s3_prefix', ''), + EmitInterval=self.access_logs.get('interval', 60), + ) + if not curr_attr == attr: + attributes['AccessLog'] = attr + + if not attributes: + return False + + self.changed = True + if self.check_mode: + return True + + try: + self.client.modify_load_balancer_attributes( + aws_retry=True, + LoadBalancerName=self.name, + LoadBalancerAttributes=attributes + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to apply load balancer attrbutes") + + def _proxy_policy_name(self): + return 'ProxyProtocol-policy' + + def _policy_name(self, policy_type): + return 'ec2-elb-lb-{0}'.format(policy_type) + + def _get_listener_policies(self): + """Get a list of listener policies mapped to the LoadBalancerPort""" + if not self.elb: + return {} + listener_descriptions = self.elb.get('ListenerDescriptions', []) + policies = {l['LoadBalancerPort']: l['PolicyNames'] for l in listener_descriptions} + return policies + + def _set_listener_policies(self, port, policies): + self.changed = True + if self.check_mode: + return True + + try: + self.client.set_load_balancer_policies_of_listener( + aws_retry=True, + LoadBalancerName=self.name, + LoadBalancerPort=port, + PolicyNames=list(policies), + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to set load balancer listener policies", + port=port, policies=policies) + + return True + + def _get_stickiness_policies(self): + """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies""" + return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] in ['AppCookieStickinessPolicyType', 'LBCookieStickinessPolicyType']) + + def _get_app_stickness_policy_map(self): + """Get a mapping of App Cookie Stickiness policy names to their definitions""" + policies = self.elb.get('Policies', {}).get('AppCookieStickinessPolicies', []) + return {p['PolicyName']: p for p in policies} + + def _get_lb_stickness_policy_map(self): + """Get a mapping of LB Cookie Stickiness policy names to their definitions""" + policies = self.elb.get('Policies', {}).get('LBCookieStickinessPolicies', []) + return {p['PolicyName']: p for p in policies} + + def _purge_stickiness_policies(self): + """Removes all stickiness policies from all Load Balancers""" + # Used when purging stickiness policies or updating a policy (you can't + # update a policy while it's connected to a Listener) + stickiness_policies = set(self._get_stickiness_policies()) + listeners = self.elb['ListenerDescriptions'] + changed = False + for listener in listeners: + port = listener['Listener']['LoadBalancerPort'] + policies = set(listener['PolicyNames']) + new_policies = set(policies - stickiness_policies) + if policies != new_policies: + changed |= self._set_listener_policies(port, new_policies) + + return changed + + def _set_stickiness_policies(self): + if self.stickiness is None: + return False + + # Make sure that the list of policies and listeners is up to date, we're + # going to make changes to all listeners + self._update_descriptions() + + if not self.stickiness['enabled']: + return self._purge_stickiness_policies() + + if self.stickiness['type'] == 'loadbalancer': + policy_name = self._policy_name('LBCookieStickinessPolicyType') + expiration = self.stickiness.get('expiration') + if not expiration: + expiration = 0 + policy_description = dict( + PolicyName=policy_name, + CookieExpirationPeriod=expiration, + ) + existing_policies = self._get_lb_stickness_policy_map() + add_method = self.client.create_lb_cookie_stickiness_policy + elif self.stickiness['type'] == 'application': + policy_name = self._policy_name('AppCookieStickinessPolicyType') + policy_description = dict( + PolicyName=policy_name, + CookieName=self.stickiness.get('cookie', 0) + ) + existing_policies = self._get_app_stickness_policy_map() + add_method = self.client.create_app_cookie_stickiness_policy + else: + # We shouldn't get here... + self.module.fail_json( + msg='Unknown stickiness policy {0}'.format( + self.stickiness['type'] + ) + ) + + changed = False + # To update a policy we need to delete then re-add, and we can only + # delete if the policy isn't attached to a listener + if policy_name in existing_policies: + if existing_policies[policy_name] != policy_description: + changed |= self._purge_stickiness_policies() + + if changed: + self._update_descriptions() + + changed |= self._set_stickiness_policy( + method=add_method, + description=policy_description, + existing_policies=existing_policies, + ) + + listeners = self.elb['ListenerDescriptions'] + for listener in listeners: + changed |= self._set_lb_stickiness_policy( + listener=listener, + policy=policy_name + ) + return changed + + def _delete_loadbalancer_policy(self, policy_name): + self.changed = True + if self.check_mode: + return True + + try: + self.client.delete_load_balancer_policy( + LoadBalancerName=self.name, + PolicyName=policy_name, + ) + except is_boto3_error_code('InvalidConfigurationRequest'): + # Already deleted + return False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Failed to load balancer policy {0}".format(policy_name)) + return True + + def _set_stickiness_policy(self, method, description, existing_policies=None): + changed = False + if existing_policies: + policy_name = description['PolicyName'] + if policy_name in existing_policies: + if existing_policies[policy_name] == description: + return False + if existing_policies[policy_name] != description: + changed |= self._delete_loadbalancer_policy(policy_name) + + self.changed = True + changed = True + + if self.check_mode: + return changed + + # This needs to be in place for comparisons, but not passed to the + # method. + if not description.get('CookieExpirationPeriod', None): + description.pop('CookieExpirationPeriod', None) + + try: + method( + aws_retry=True, + LoadBalancerName=self.name, + **description + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create load balancer stickiness policy", + description=description) + return changed + + def _set_lb_stickiness_policy(self, listener, policy): + port = listener['Listener']['LoadBalancerPort'] + stickiness_policies = set(self._get_stickiness_policies()) + changed = False + + policies = set(listener['PolicyNames']) + new_policies = list(policies - stickiness_policies) + new_policies.append(policy) + + if policies != set(new_policies): + changed |= self._set_listener_policies(port, new_policies) + + return changed + + def _get_backend_policies(self): + """Get a list of backend policies mapped to the InstancePort""" + if not self.elb: + return {} + server_descriptions = self.elb.get('BackendServerDescriptions', []) + policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions} + return policies + + def _get_proxy_protocol_policy(self): + """Returns the name of the name of the ProxyPolicy if created""" + all_proxy_policies = self._get_proxy_policies() + if not all_proxy_policies: + return None + if len(all_proxy_policies) == 1: + return all_proxy_policies[0] + return all_proxy_policies + + def _get_proxy_policies(self): + """Get a list of ProxyProtocolPolicyType policies""" + return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] == 'ProxyProtocolPolicyType') + + def _get_policy_map(self): + """Get a mapping of Policy names to their definitions""" + return {p['PolicyName']: p for p in self.elb_policies} + + def _set_backend_policies(self): + """Sets policies for all backends""" + # Currently only supports setting ProxyProtocol policies + if not self.listeners: + return False + + ensure_proxy_protocol = False + backend_policies = self._get_backend_policies() + proxy_policies = set(self._get_proxy_policies()) + + proxy_ports = dict() + for listener in self.listeners: + proxy_protocol = listener.get('proxy_protocol', None) + # Only look at the listeners for which proxy_protocol is defined + if proxy_protocol is None: + next + instance_port = listener.get('instance_port') + if proxy_ports.get(instance_port, None) is not None: + if proxy_ports[instance_port] != proxy_protocol: + self.module.fail_json_aws( + 'proxy_protocol set to conflicting values for listeners' + ' on port {0}'.format(instance_port)) + proxy_ports[instance_port] = proxy_protocol + + if not proxy_ports: + return False + + changed = False + + # If anyone's set proxy_protocol to true, make sure we have our policy + # in place. + proxy_policy_name = self._proxy_policy_name() + if any(proxy_ports.values()): + changed |= self._set_proxy_protocol_policy(proxy_policy_name) + + for port in proxy_ports: + current_policies = set(backend_policies.get(port, [])) + new_policies = list(current_policies - proxy_policies) + if proxy_ports[port]: + new_policies.append(proxy_policy_name) + + changed |= self._set_backend_policy(port, new_policies) + + return changed + + def _set_backend_policy(self, port, policies): + backend_policies = self._get_backend_policies() + current_policies = set(backend_policies.get(port, [])) + + if current_policies == set(policies): + return False + + self.changed = True + + if self.check_mode: + return True + + try: + self.client.set_load_balancer_policies_for_backend_server( + aws_retry=True, + LoadBalancerName=self.name, + InstancePort=port, + PolicyNames=policies, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to set load balancer backend policies", + port=port, policies=policies) + + return True + + def _set_proxy_protocol_policy(self, policy_name): + """Install a proxy protocol policy if needed""" + policy_map = self._get_policy_map() + + policy_attributes = [dict(AttributeName='ProxyProtocol', AttributeValue='true')] + + proxy_policy = dict( + PolicyName=policy_name, + PolicyTypeName='ProxyProtocolPolicyType', + PolicyAttributeDescriptions=policy_attributes, + ) + + existing_policy = policy_map.get(policy_name) + if proxy_policy == existing_policy: + return False + + if existing_policy is not None: + self.module.fail_json( + msg="Unable to configure ProxyProtocol policy. " + "Policy with name {0} already exists and doesn't match.".format(policy_name), + policy=proxy_policy, existing_policy=existing_policy, + ) + + proxy_policy['PolicyAttributes'] = proxy_policy.pop('PolicyAttributeDescriptions') + proxy_policy['LoadBalancerName'] = self.name + self.changed = True + + if self.check_mode: + return True + + try: + self.client.create_load_balancer_policy( + aws_retry=True, + **proxy_policy + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to create load balancer policy", policy=proxy_policy) + + return True + + def _get_instance_ids(self): + """Get the current list of instance ids installed in the elb""" + elb = self.elb or {} + return list(i['InstanceId'] for i in elb.get('Instances', [])) + + def _change_instances(self, method, instances): + if not instances: + return False + + self.changed = True + if self.check_mode: + return True + + instance_id_list = list({'InstanceId': i} for i in instances) + try: + method( + aws_retry=True, + LoadBalancerName=self.name, + Instances=instance_id_list, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to change instance registration", + instances=instance_id_list, name=self.name) + return True + + def _set_instance_ids(self): + """Register or deregister instances from an lb instance""" + new_instances = self.instance_ids or [] + existing_instances = self._get_instance_ids() + + instances_to_add = set(new_instances) - set(existing_instances) + if self.purge_instance_ids: + instances_to_remove = set(existing_instances) - set(new_instances) + else: + instances_to_remove = [] + + changed = False + + changed |= self._change_instances(self.client.register_instances_with_load_balancer, + instances_to_add) + if self.wait: + self._wait_for_instance_state('instance_in_service', list(instances_to_add)) + changed |= self._change_instances(self.client.deregister_instances_from_load_balancer, + instances_to_remove) + if self.wait: + self._wait_for_instance_state('instance_deregistered', list(instances_to_remove)) + + return changed + + def _get_tags(self): + tags = self.client.describe_tags(aws_retry=True, + LoadBalancerNames=[self.name]) + if not tags: + return {} + try: + tags = tags['TagDescriptions'][0]['Tags'] + except (KeyError, TypeError): + return {} + return boto3_tag_list_to_ansible_dict(tags) + + def _add_tags(self, tags_to_set): + if not tags_to_set: + return False + self.changed = True + if self.check_mode: + return True + tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set) + self.client.add_tags(LoadBalancerNames=[self.name], Tags=tags_to_add) + return True + + def _remove_tags(self, tags_to_unset): + if not tags_to_unset: + return False + self.changed = True + if self.check_mode: + return True + tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset] + self.client.remove_tags(LoadBalancerNames=[self.name], Tags=tags_to_remove) + return True + + def _set_tags(self): + """Add/Delete tags""" + if self.tags is None: + return False + + try: + current_tags = self._get_tags() + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to get load balancer tags") + + tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags, + self.purge_tags) + + changed = False + try: + changed |= self._remove_tags(tags_to_unset) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to remove load balancer tags") + try: + changed |= self._add_tags(tags_to_set) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.module.fail_json_aws(e, msg="Failed to add load balancer tags") + + return changed + + def _validate_stickiness(self, stickiness): + problem_found = False + if not stickiness: + return problem_found + if not stickiness['enabled']: + return problem_found + if stickiness['type'] == 'application': + if not stickiness.get('cookie'): + problem_found = True + self.module.fail_json( + msg='cookie must be specified when stickiness type is "application"', + stickiness=stickiness, + ) + if stickiness.get('expiration'): + self.warn( + msg='expiration is ignored when stickiness type is "application"',) + if stickiness['type'] == 'loadbalancer': + if stickiness.get('cookie'): + self.warn( + msg='cookie is ignored when stickiness type is "loadbalancer"',) + return problem_found + + def _validate_access_logs(self, access_logs): + problem_found = False + if not access_logs: + return problem_found + if not access_logs['enabled']: + return problem_found + if not access_logs.get('s3_location', None): + problem_found = True + self.module.fail_json( + msg='s3_location must be provided when access_logs.state is "present"') + return problem_found + + def _validate_creation_requirements(self): + if self.elb: + return False + problem_found = False + if not self.subnets and not self.zones: + problem_found = True + self.module.fail_json( + msg='One of subnets or zones must be provided when creating an ELB') + if not self.listeners: + problem_found = True + self.module.fail_json( + msg='listeners must be provided when creating an ELB') + return problem_found + + def _validate_listeners(self, listeners): + if not listeners: + return False + return any(self._validate_listener(listener) for listener in listeners) + + def _validate_listener(self, listener): + problem_found = False + if not listener: + return problem_found + for protocol in ['instance_protocol', 'protocol']: + value = listener.get(protocol, None) + problem = self._validate_protocol(value) + problem_found |= problem + if problem: + self.module.fail_json( + msg='Invalid protocol ({0}) in listener'.format(value), + listener=listener) + return problem_found + + def _validate_health_check(self, health_check): + if not health_check: + return False + protocol = health_check['ping_protocol'] + if self._validate_protocol(protocol): + self.module.fail_json( + msg='Invalid protocol ({0}) defined in health check'.format(protocol), + health_check=health_check,) + if protocol.upper() in ['HTTP', 'HTTPS']: + if not health_check['ping_path']: + self.module.fail_json( + msg='For HTTP and HTTPS health checks a ping_path must be provided', + health_check=health_check,) + return False + + def _validate_protocol(self, protocol): + if not protocol: + return False + return protocol.upper() not in ['HTTP', 'HTTPS', 'TCP', 'SSL'] + + @AWSRetry.jittered_backoff() + def _describe_loadbalancer(self, lb_name): + paginator = self.client.get_paginator('describe_load_balancers') + return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()['LoadBalancerDescriptions'] + + def _get_vpc_from_subnets(self, subnets): + if not subnets: + return None + + subnet_details = self._describe_subnets(list(subnets)) + vpc_ids = set(subnet['VpcId'] for subnet in subnet_details) + + if not vpc_ids: + return None + if len(vpc_ids) > 1: + self.module.fail_json("Subnets for an ELB may not span multiple VPCs", + subnets=subnet_details, vpc_ids=vpc_ids) + vpc_id = vpc_ids.pop() + + @AWSRetry.jittered_backoff() + def _describe_subnets(self, subnet_ids): + paginator = self.ec2_client.get_paginator('describe_subnets') + return paginator.paginate(SubnetIds=subnet_ids).build_full_result()['Subnets'] + + # Wrap it so we get the backoff + @AWSRetry.jittered_backoff() + def _get_ec2_security_group_ids_from_names(self, **params): + return get_ec2_security_group_ids_from_names(ec2_connection=self.ec2_client, **params) + + +def main(): + + access_log_spec = dict( + enabled=dict(required=False, type='bool', default=True), + s3_location=dict(required=False, type='str'), + s3_prefix=dict(required=False, type='str', default=""), + interval=dict(required=False, type='int', default=60, choices=[5, 60]), + ) + + stickiness_spec = dict( + type=dict(required=False, type='str', choices=['application', 'loadbalancer']), + enabled=dict(required=False, type='bool', default=True), + cookie=dict(required=False, type='str'), + expiration=dict(required=False, type='int') + ) + + healthcheck_spec = dict( + ping_protocol=dict(required=True, type='str'), + ping_path=dict(required=False, type='str'), + ping_port=dict(required=True, type='int'), + interval=dict(required=True, type='int'), + timeout=dict(aliases=['response_timeout'], required=True, type='int'), + unhealthy_threshold=dict(required=True, type='int'), + healthy_threshold=dict(required=True, type='int'), + ) + + listeners_spec = dict( + load_balancer_port=dict(required=True, type='int'), + instance_port=dict(required=True, type='int'), + ssl_certificate_id=dict(required=False, type='str'), + protocol=dict(required=True, type='str'), + instance_protocol=dict(required=False, type='str'), + proxy_protocol=dict(required=False, type='bool'), + ) + + argument_spec = dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=True), + listeners=dict(type='list', elements='dict', options=listeners_spec), + purge_listeners=dict(default=True, type='bool'), + instance_ids=dict(type='list', elements='str'), + purge_instance_ids=dict(default=False, type='bool'), + zones=dict(type='list', elements='str'), + purge_zones=dict(default=False, type='bool'), + security_group_ids=dict(type='list', elements='str'), + security_group_names=dict(type='list', elements='str'), + health_check=dict(type='dict', options=healthcheck_spec), + subnets=dict(type='list', elements='str'), + purge_subnets=dict(default=False, type='bool'), + scheme=dict(choices=['internal', 'internet-facing']), + connection_draining_timeout=dict(type='int'), + idle_timeout=dict(type='int'), + cross_az_load_balancing=dict(type='bool'), + stickiness=dict(type='dict', options=stickiness_spec), + access_logs=dict(type='dict', options=access_log_spec), + wait=dict(default=False, type='bool'), + wait_timeout=dict(default=180, type='int'), + tags=dict(type='dict'), + purge_tags=dict(default=True, type='bool'), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + mutually_exclusive=[ + ['security_group_ids', 'security_group_names'], + ['zones', 'subnets'], + ], + supports_check_mode=True, + ) + + wait_timeout = module.params['wait_timeout'] + state = module.params['state'] + + if wait_timeout > 600: + module.fail_json(msg='wait_timeout maximum is 600 seconds') + + elb_man = ElbManager(module) + elb_man.validate_params(state) + + if state == 'present': + elb_man.ensure_ok() + # original boto style + elb = elb_man.get_info() + # boto3 style + lb = elb_man.get_load_balancer() + ec2_result = dict(elb=elb, load_balancer=lb) + elif state == 'absent': + elb_man.ensure_gone() + # original boto style + elb = elb_man.get_info() + ec2_result = dict(elb=elb) + + ansible_facts = {'ec2_elb': 'info'} + + module.exit_json( + ansible_facts=ansible_facts, + changed=elb_man.changed, + **ec2_result, + ) + + +if __name__ == '__main__': + main() diff --git a/tests/integration/targets/ec2_elb_lb/aliases b/tests/integration/targets/ec2_elb_lb/aliases deleted file mode 100644 index 4ef4b2067d0..00000000000 --- a/tests/integration/targets/ec2_elb_lb/aliases +++ /dev/null @@ -1 +0,0 @@ -cloud/aws diff --git a/tests/integration/targets/ec2_elb_lb/defaults/main.yml b/tests/integration/targets/ec2_elb_lb/defaults/main.yml deleted file mode 100644 index a85ab79ac2c..00000000000 --- a/tests/integration/targets/ec2_elb_lb/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# defaults file for ec2_elb_lb -elb_name: 'ansible-test-{{ tiny_prefix }}' diff --git a/tests/integration/targets/ec2_elb_lb/tasks/main.yml b/tests/integration/targets/ec2_elb_lb/tasks/main.yml deleted file mode 100644 index f0aa2bdaa55..00000000000 --- a/tests/integration/targets/ec2_elb_lb/tasks/main.yml +++ /dev/null @@ -1,334 +0,0 @@ ---- -# __Test Info__ -# Create a self signed cert and upload it to AWS -# http://www.akadia.com/services/ssh_test_certificate.html -# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html - -# __Test Outline__ -# -# __ec2_elb_lb__ -# create test elb with listeners and certificate -# change AZ's -# change listeners -# remove listeners -# remove elb - -# __ec2-common__ -# test environment variable EC2_REGION -# test with no parameters -# test with only instance_id -# test invalid region parameter -# test valid region parameter -# test invalid ec2_url parameter -# test valid ec2_url parameter -# test credentials from environment -# test credential parameters - -- module_defaults: - group/aws: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - block: - - # ============================================================ - # create test elb with listeners, certificate, and health check - - - name: Create ELB - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - - protocol: http - load_balancer_port: 8080 - instance_port: 8080 - health_check: - ping_protocol: http - ping_port: 80 - ping_path: "/index.html" - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 10 - register: info - - - assert: - that: - - 'info.changed' - - 'info.elb.status == "created"' - - '"{{ aws_region }}a" in info.elb.zones' - - '"{{ aws_region }}b" in info.elb.zones' - - 'info.elb.health_check.healthy_threshold == 10' - - 'info.elb.health_check.interval == 30' - - 'info.elb.health_check.target == "HTTP:80/index.html"' - - 'info.elb.health_check.timeout == 5' - - 'info.elb.health_check.unhealthy_threshold == 2' - - '[80, 80, "HTTP", "HTTP"] in info.elb.listeners' - - '[8080, 8080, "HTTP", "HTTP"] in info.elb.listeners' - - # ============================================================ - - # check ports, would be cool, but we are at the mercy of AWS - # to start things in a timely manner - - #- name: check to make sure 80 is listening - # wait_for: host={{ info.elb.dns_name }} port=80 timeout=600 - # register: result - - #- name: assert can connect to port# - # assert: 'result.state == "started"' - - #- name: check to make sure 443 is listening - # wait_for: host={{ info.elb.dns_name }} port=443 timeout=600 - # register: result - - #- name: assert can connect to port# - # assert: 'result.state == "started"' - - # ============================================================ - - # Change AZ's - - - name: Change AZ's - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - zones: - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_zones: yes - health_check: - ping_protocol: http - ping_port: 80 - ping_path: "/index.html" - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 10 - register: info - - - - - assert: - that: - - 'info.elb.status == "ok"' - - 'info.changed' - - 'info.elb.zones[0] == "{{ aws_region }}c"' - - # ============================================================ - - # Update AZ's - - - name: Update AZ's - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - purge_zones: yes - register: info - - - assert: - that: - - 'info.changed' - - 'info.elb.status == "ok"' - - '"{{ aws_region }}a" in info.elb.zones' - - '"{{ aws_region }}b" in info.elb.zones' - - '"{{ aws_region }}c" in info.elb.zones' - - - # ============================================================ - - # Purge Listeners - - - name: Purge Listeners - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 81 - purge_listeners: yes - register: info - - - assert: - that: - - 'info.elb.status == "ok"' - - 'info.changed' - - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners' - - 'info.elb.listeners|length == 1' - - - - # ============================================================ - - # add Listeners - - - name: Add Listeners - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 8081 - instance_port: 8081 - purge_listeners: no - register: info - - - assert: - that: - - 'info.elb.status == "ok"' - - 'info.changed' - - '[80, 81, "HTTP", "HTTP"] in info.elb.listeners' - - '[8081, 8081, "HTTP", "HTTP"] in info.elb.listeners' - - 'info.elb.listeners|length == 2' - - - # ============================================================ - - - name: test with no name - ec2_elb_lb: - state: present - register: result - ignore_errors: true - - - name: assert failure when called with no parameters - assert: - that: - - 'result.failed' - - 'result.msg == "missing required arguments: name"' - - - # ============================================================ - - name: test with only name (state missing) - ec2_elb_lb: - name: "{{ elb_name }}" - register: result - ignore_errors: true - - - name: assert failure when called with only name - assert: - that: - - 'result.failed' - - 'result.msg == "missing required arguments: state"' - - - # ============================================================ - - name: test invalid region parameter - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - region: 'asdf querty 1234' - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - register: result - ignore_errors: true - - - name: assert invalid region parameter - assert: - that: - - 'result.failed' - - '"Region asdf querty 1234 does not seem to be available" in result.msg' - - - # ============================================================ - - name: test no authentication parameters - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - aws_access_key: '{{ omit }}' - aws_secret_key: '{{ omit }}' - security_token: '{{ omit }}' - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - register: result - ignore_errors: true - - - name: assert valid region parameter - assert: - that: - - 'result.failed' - - '"No handler was ready to authenticate" in result.msg' - - - # ============================================================ - - name: test credentials from environment - ec2_elb_lb: - name: "{{ elb_name }}" - state: present - aws_access_key: "{{ omit }}" - aws_secret_key: "{{ omit }}" - security_token: "{{ omit }}" - zones: - - "{{ aws_region }}a" - - "{{ aws_region }}b" - - "{{ aws_region }}c" - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 81 - environment: - EC2_ACCESS_KEY: bogus_access_key - EC2_SECRET_KEY: bogus_secret_key - register: result - ignore_errors: true - - - name: assert credentials from environment - assert: - that: - - 'result.failed' - - '"InvalidClientTokenId" in result.exception' - - - always: - - # ============================================================ - - name: remove the test load balancer completely - ec2_elb_lb: - name: "{{ elb_name }}" - state: absent - register: result - - - name: assert the load balancer was removed - assert: - that: - - 'result.changed' - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "deleted"' diff --git a/tests/integration/targets/elb_classic_lb/aliases b/tests/integration/targets/elb_classic_lb/aliases new file mode 100644 index 00000000000..8e0974e45eb --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/aliases @@ -0,0 +1,4 @@ +# 20+ minutes +slow + +cloud/aws diff --git a/tests/integration/targets/elb_classic_lb/defaults/main.yml b/tests/integration/targets/elb_classic_lb/defaults/main.yml new file mode 100644 index 00000000000..3f6c387839b --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/defaults/main.yml @@ -0,0 +1,163 @@ +--- +# defaults file for ec2_elb_lb +elb_name: 'ansible-test-{{ tiny_prefix }}' + +vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' +subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' +subnet_cidr_3: '10.{{ 256 | random(seed=resource_prefix) }}.3.0/24' +subnet_cidr_4: '10.{{ 256 | random(seed=resource_prefix) }}.4.0/24' + +default_tags: + snake_case_key: snake_case_value + camelCaseKey: camelCaseValue + PascalCaseKey: PascalCaseValue + "key with spaces": value with spaces + "Upper With Spaces": Upper With Spaces + +partial_tags: + snake_case_key: snake_case_value + camelCaseKey: camelCaseValue + +updated_tags: + updated_snake_case_key: updated_snake_case_value + updatedCamelCaseKey: updatedCamelCaseValue + UpdatedPascalCaseKey: UpdatedPascalCaseValue + "updated key with spaces": updated value with spaces + "updated Upper With Spaces": Updated Upper With Spaces + +default_listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + instance_protocol: http +default_listener_tuples: + - [80, 80, "HTTP", "HTTP"] + - [8080, 8080, "HTTP", "HTTP"] + +purged_listeners: + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + instance_protocol: http +purged_listener_tuples: + - [8080, 8080, "HTTP", "HTTP"] + +updated_listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 8181 + - protocol: http + load_balancer_port: 8080 + instance_port: 8080 + instance_protocol: http +updated_listener_tuples: + - [80, 8181, "HTTP", "HTTP"] + - [8080, 8080, "HTTP", "HTTP"] + +unproxied_listener: + - protocol: http + load_balancer_port: 80 + instance_port: 8181 + proxy_protocol: False +unproxied_listener_tuples: + - [80, 8181, "HTTP", "HTTP"] + +proxied_listener: + - protocol: http + load_balancer_port: 80 + instance_port: 8181 + proxy_protocol: True +proxied_listener_tuples: + - [80, 8181, "HTTP", "HTTP"] + +ssh_listeners: + - protocol: tcp + load_balancer_port: 22 + instance_port: 22 + instance_protocol: tcp +ssh_listener_tuples: + - [22, 22, "TCP", "TCP"] + +default_health_check: + ping_protocol: http + ping_port: 80 + ping_path: "/index.html" + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 10 +default_health_check_target: "HTTP:80/index.html" + +updated_health_check: + ping_protocol: http + ping_port: 8181 + ping_path: "/healthz" + response_timeout: 15 + interval: 42 + unhealthy_threshold: 7 + healthy_threshold: 6 +updated_health_check_target: "HTTP:8181/healthz" + +nonhttp_health_check: + ping_protocol: tcp + ping_port: 8282 + response_timeout: 16 + interval: 43 + unhealthy_threshold: 8 + healthy_threshold: 2 +nonhttp_health_check_target: "TCP:8282" + +ssh_health_check: + ping_protocol: tcp + ping_port: 22 + response_timeout: 5 + interval: 10 + unhealthy_threshold: 2 + healthy_threshold: 2 +ssh_health_check_target: "TCP:22" + +default_idle_timeout: 25 +updated_idle_timeout: 50 +default_drain_timeout: 15 +updated_drain_timeout: 25 + +app_stickiness: + type: application + cookie: MyCookie + enabled: true + +updated_app_stickiness: + type: application + cookie: AnotherCookie + +lb_stickiness: + type: loadbalancer + +updated_lb_stickiness: + type: loadbalancer + expiration: 600 + +# Amazon's SDKs don't provide the list of account ID's. Amazon only provide a +# web page. If you want to run the tests outside the US regions you'll need to +# update this. +# https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html +access_log_account_id_map: + us-east-1: '127311923021' + us-east-2: '033677994240' + us-west-1: '027434742980' + us-west-2: '797873946194' + us-gov-west-1: '048591011584' + us-gov-east-1: '190560391635' + +access_log_account_id: '{{ access_log_account_id_map[aws_region] }}' + +s3_logging_bucket_a: 'ansible-test-{{ tiny_prefix }}-a' +s3_logging_bucket_b: 'ansible-test-{{ tiny_prefix }}-b' +default_logging_prefix: 'logs' +updated_logging_prefix: 'mylogs' +default_logging_interval: 5 +updated_logging_interval: 60 diff --git a/tests/integration/targets/ec2_elb_lb/meta/main.yml b/tests/integration/targets/elb_classic_lb/meta/main.yml similarity index 100% rename from tests/integration/targets/ec2_elb_lb/meta/main.yml rename to tests/integration/targets/elb_classic_lb/meta/main.yml diff --git a/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml b/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml new file mode 100644 index 00000000000..28207ba6985 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml @@ -0,0 +1,292 @@ +--- +- block: + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + # ============================================================ + # create test elb with listeners, certificate, and health check + + - name: Create internal ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "created" + + - name: Create ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + + - name: Create internal ELB idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Create internal ELB idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + + - ec2_eni_info: + filters: + description: 'ELB {{ elb_name }}' + register: info + + - assert: + that: + - info.network_interfaces | length > 0 + + - elb_classic_lb_info: + names: ['{{ elb_name }}'] + register: info + + - assert: + that: + - info.elbs | length > 0 + + # ============================================================ + # Now we're outside of the creation we drop the defaults + # ============================================================ + + - name: Add a subnet - no purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Add a subnet - no purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - subnet_c in result.elb.subnets + + - name: Add a subnet - no purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Add a subnet - no purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}'] + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - subnet_c in result.elb.subnets + + # While purging try adding a subnet from the same AZ as one we're purging. + # This is important because you can't add 2 AZs to an LB from the same AZ at + # the same time. + - name: Add a subnet - purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Add a subnet - purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a not in result.elb.subnets + - subnet_b not in result.elb.subnets + - subnet_c in result.elb.subnets + - subnet_a2 in result.elb.subnets + + - name: Add a subnet - purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Add a subnet - purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + purge_subnets: true + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + - subnet_a not in result.elb.subnets + - subnet_b not in result.elb.subnets + - subnet_c in result.elb.subnets + - subnet_a2 in result.elb.subnets + + # ============================================================ + + - name: remove the test load balancer completely (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer would be removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer was removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely (idempotency) (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + - name: remove the test load balancer completely (idempotency) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml b/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml new file mode 100644 index 00000000000..d76f62be89a --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml @@ -0,0 +1,273 @@ +--- +- block: + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internet-facing' + # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + # ============================================================ + # create test elb with listeners, certificate, and health check + + - name: Create public ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "created" + + - name: Create public ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + + - name: Create public ELB idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Create public ELB idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + + - ec2_eni_info: + filters: + description: 'ELB {{ elb_name }}' + register: info + + - assert: + that: + - info.network_interfaces | length > 0 + + - elb_classic_lb_info: + names: ['{{ elb_name }}'] + register: info + + - assert: + that: + - info.elbs | length > 0 + + # ============================================================ + # Now we're outside of the creation we drop the defaults + # ============================================================ + + - name: Add a zone - no purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Add a zone - no purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + + - name: Add a zone - no purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Add a zone - no purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - availability_zone_c in result.elb.zones + + # ============================================================ + + - name: Remove a zone - purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + check_mode: true + + - assert: + that: + - result is changed + - result.elb.status == "exists" + + - name: Remove a zone - purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + + - assert: + that: + - result is changed + - result.elb.status == "exists" + - availability_zone_a not in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + + - name: Remove a zone - purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + check_mode: true + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + + - name: Remove a zone - purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + zones: ['{{ availability_zone_c }}'] + purge_zones: true + register: result + + - assert: + that: + - result is not changed + - result.elb.status == "exists" + - availability_zone_a not in result.elb.zones + - availability_zone_b not in result.elb.zones + - availability_zone_c in result.elb.zones + + # ============================================================ + + - name: remove the test load balancer completely (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer would be removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer was removed + assert: + that: + - result is changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "deleted"' + + - name: remove the test load balancer completely (idempotency) (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + check_mode: true + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + - name: remove the test load balancer completely (idempotency) + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + + - name: assert the load balancer is gone + assert: + that: + - result is not changed + - 'result.elb.name == "{{ elb_name }}"' + - 'result.elb.status == "gone"' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml b/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml new file mode 100644 index 00000000000..92f25395967 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml @@ -0,0 +1,9 @@ +--- +- name: Delete instance + ec2_instance: + instance_ids: + - '{{ instance_a }}' + - '{{ instance_b }}' + state: absent + wait: true + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml b/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml new file mode 100644 index 00000000000..955f3da6238 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml @@ -0,0 +1,32 @@ +--- +- name: Create empty temporary directory + tempfile: + state: directory + register: tmpdir + ignore_errors: true + +- name: Empty S3 buckets before deletion + s3_sync: + bucket: '{{ item }}' + delete: true + file_root: '{{ tmpdir.path }}' + ignore_errors: true + loop: + - '{{ s3_logging_bucket_a }}' + - '{{ s3_logging_bucket_b }}' + +- name: Delete S3 bucket for access logs + s3_bucket: + name: '{{ item }}' + state: absent + register: logging_bucket + ignore_errors: true + loop: + - '{{ s3_logging_bucket_a }}' + - '{{ s3_logging_bucket_b }}' + +- name: Remove temporary directory + file: + state: absent + path: "{{ tmpdir.path }}" + ignore_errors: yes diff --git a/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml b/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml new file mode 100644 index 00000000000..fd7ee965feb --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml @@ -0,0 +1,29 @@ +--- +- name: delete security groups + ec2_group: + name: '{{ item }}' + state: absent + ignore_errors: true + loop: + - '{{ resource_prefix }}-a' + - '{{ resource_prefix }}-b' + - '{{ resource_prefix }}-c' + +- name: delete subnets + ec2_vpc_subnet: + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ item }}' + state: absent + ignore_errors: true + loop: + - '{{ subnet_cidr_1 }}' + - '{{ subnet_cidr_2 }}' + - '{{ subnet_cidr_3 }}' + - '{{ subnet_cidr_4 }}' + +- name: delete VPC + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: absent + name: '{{ resource_prefix }}' + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml b/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml new file mode 100644 index 00000000000..50679a8c1e2 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml @@ -0,0 +1,10 @@ +--- +- name: list available AZs + aws_az_info: + register: region_azs + +- name: pick AZs for testing + set_fact: + availability_zone_a: "{{ region_azs.availability_zones[0].zone_name }}" + availability_zone_b: "{{ region_azs.availability_zones[1].zone_name }}" + availability_zone_c: "{{ region_azs.availability_zones[2].zone_name }}" diff --git a/tests/integration/targets/elb_classic_lb/tasks/main.yml b/tests/integration/targets/elb_classic_lb/tasks/main.yml new file mode 100644 index 00000000000..d964c47d15c --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/main.yml @@ -0,0 +1,54 @@ +--- +# __Test Info__ +# Create a self signed cert and upload it to AWS +# http://www.akadia.com/services/ssh_test_certificate.html +# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/ssl-server-cert.html + +# __Test Outline__ +# +# __elb_classic_lb__ +# create test elb with listeners and certificate +# change AZ's +# change listeners +# remove listeners +# remove elb + +- module_defaults: + group/aws: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + collections: + - community.aws + - amazon.aws + block: + + - include_tasks: missing_params.yml + + - include_tasks: describe_region.yml + - include_tasks: setup_vpc.yml + - include_tasks: setup_instances.yml + - include_tasks: setup_s3.yml + + - include_tasks: basic_public.yml + - include_tasks: basic_internal.yml + - include_tasks: schema_change.yml + + - include_tasks: simple_changes.yml + + always: + + # ============================================================ + # ELB should already be gone, but double-check + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true + + - include_tasks: cleanup_s3.yml + - include_tasks: cleanup_instances.yml + - include_tasks: cleanup_vpc.yml diff --git a/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml b/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml new file mode 100644 index 00000000000..74779e32c9e --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml @@ -0,0 +1,203 @@ +--- +# Test behaviour when mandatory params aren't passed +- block: + # ============================================================ + + - name: test with no name + elb_classic_lb: + state: present + register: result + ignore_errors: true + + - name: assert failure when called with no parameters + assert: + that: + - 'result.failed' + - '"missing required arguments" in result.msg' + - '"name" in result.msg' + + - name: test with only name (state missing) + elb_classic_lb: + name: "{{ elb_name }}" + register: result + ignore_errors: true + + - name: assert failure when called with only name + assert: + that: + - 'result.failed' + - '"missing required arguments" in result.msg' + - '"state" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + register: result + ignore_errors: true + + - name: assert failure when neither subnets nor AZs are provided on creation + assert: + that: + - 'result.failed' + - '"subnets" in result.msg' + - '"zones" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + register: result + ignore_errors: true + + - name: assert failure when listeners not provided on creation + assert: + that: + - 'result.failed' + - '"listeners" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: junk + register: result + ignore_errors: true + + - name: assert failure when listeners contains invalid protocol + assert: + that: + - 'result.failed' + - '"protocol" in result.msg' + - '"junk" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + instance_protocol: junk + register: result + ignore_errors: true + + - name: assert failure when listeners contains invalid instance_protocol + assert: + that: + - 'result.failed' + - '"protocol" in result.msg' + - '"junk" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + health_check: + ping_protocol: junk + ping_port: 80 + interval: 5 + timeout: 5 + unhealthy_threshold: 5 + healthy_threshold: 5 + register: result + ignore_errors: true + + - name: assert failure when healthcheck ping_protocol is invalid + assert: + that: + - 'result.failed' + - '"protocol" in result.msg' + - '"junk" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + health_check: + ping_protocol: http + ping_port: 80 + interval: 5 + timeout: 5 + unhealthy_threshold: 5 + healthy_threshold: 5 + register: result + ignore_errors: true + + - name: assert failure when HTTP healthcheck missing a ping_path + assert: + that: + - 'result.failed' + - '"ping_path" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + stickiness: + type: application + register: result + ignore_errors: true + + - name: assert failure when app stickiness policy missing cookie name + assert: + that: + - 'result.failed' + - '"cookie" in result.msg' + + - elb_classic_lb: + name: "{{ elb_name }}" + state: present + scheme: 'internal' + subnets: ['subnet-123456789'] + listeners: + - load_balancer_port: 80 + instance_port: 80 + protocol: http + access_logs: + interval: 60 + register: result + ignore_errors: true + + - name: assert failure when access log is missing a bucket + assert: + that: + - 'result.failed' + - '"s3_location" in result.msg' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml b/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml new file mode 100644 index 00000000000..cc667bef28e --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml @@ -0,0 +1,189 @@ +--- +- block: + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internet-facing' + # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + - name: Create ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.status == 'created' + - result.elb.scheme == 'internet-facing' + + - module_defaults: + elb_classic_lb: + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + + - name: Change Schema to internal (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + + - name: Change Schema to internal + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.scheme == 'internal' + + - name: Change Schema to internal idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: Change Schema to internal idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internal' + + - name: No schema specified (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: No schema specified + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internal' + + # For creation test some basic behaviour + - module_defaults: + elb_classic_lb: + zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internet-facing' + # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + block: + + - name: Change schema to internet-facing (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is changed + + - name: Change schema to internet-facing + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is changed + - result.elb.scheme == 'internet-facing' + + - name: Change schema to internet-facing idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: Change schema to internet-facing idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internet-facing' + + - name: No schema specified (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + check_mode: true + + - assert: + that: + - result is not changed + + - name: No schema specified + elb_classic_lb: + name: "{{ elb_name }}" + state: present + schema: '{{ omit }}' + register: result + + - assert: + that: + - result is not changed + - result.elb.scheme == 'internet-facing' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml b/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml new file mode 100644 index 00000000000..beabf0ebc0e --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml @@ -0,0 +1,33 @@ +--- +- name: Get a list of images + ec2_ami_info: + filters: + owner-alias: amazon + name: "amzn2-ami-minimal-hvm-*" + description: "Amazon Linux 2 AMI *" + register: images_info + +- name: Create instance a + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-a" + image_id: "{{ images_info.images | sort(attribute='creation_date') | reverse | first | json_query('image_id') }}" + vpc_subnet_id: "{{ subnet_a }}" + instance_type: t2.micro + wait: false + security_group: "{{ sg_a }}" + register: ec2_instance_a + +- name: Create instance b + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-b" + image_id: "{{ images_info.images | sort(attribute='creation_date') | reverse | first | json_query('image_id') }}" + vpc_subnet_id: "{{ subnet_b }}" + instance_type: t2.micro + wait: false + security_group: "{{ sg_b }}" + register: ec2_instance_b + +- name: store the Instance IDs + set_fact: + instance_a: "{{ ec2_instance_a.instance_ids[0] }}" + instance_b: "{{ ec2_instance_b.instance_ids[0] }}" diff --git a/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml b/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml new file mode 100644 index 00000000000..60e9c73ccde --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml @@ -0,0 +1,26 @@ +--- +- name: Create S3 bucket for access logs + vars: + s3_logging_bucket: '{{ s3_logging_bucket_a }}' + s3_bucket: + name: '{{ s3_logging_bucket_a }}' + state: present + policy: "{{ lookup('template','s3_policy.j2') }}" + register: logging_bucket + +- assert: + that: + - logging_bucket is changed + +- name: Create S3 bucket for access logs + vars: + s3_logging_bucket: '{{ s3_logging_bucket_b }}' + s3_bucket: + name: '{{ s3_logging_bucket_b }}' + state: present + policy: "{{ lookup('template','s3_policy.j2') }}" + register: logging_bucket + +- assert: + that: + - logging_bucket is changed diff --git a/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml b/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml new file mode 100644 index 00000000000..7e35e1d9e86 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml @@ -0,0 +1,103 @@ +--- +# SETUP: vpc, subnet, security group +- name: create a VPC to work in + ec2_vpc_net: + cidr_block: '{{ vpc_cidr }}' + state: present + name: '{{ resource_prefix }}' + resource_tags: + Name: '{{ resource_prefix }}' + register: setup_vpc + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_a }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_1 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-a' + register: setup_subnet_1 + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_b }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_2 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-b' + register: setup_subnet_2 + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_c }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_3 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-c' + register: setup_subnet_3 + +- name: create a subnet + ec2_vpc_subnet: + az: '{{ availability_zone_a }}' + tags: '{{ resource_prefix }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: '{{ subnet_cidr_4 }}' + state: present + resource_tags: + Name: '{{ resource_prefix }}-a2' + register: setup_subnet_4 + +- name: create a security group + ec2_group: + name: '{{ resource_prefix }}-a' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: '{{ vpc_cidr }}' + register: setup_sg_1 + +- name: create a security group + ec2_group: + name: '{{ resource_prefix }}-b' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: '{{ vpc_cidr }}' + register: setup_sg_2 + +- name: create a security group + ec2_group: + name: '{{ resource_prefix }}-c' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: '{{ vpc_cidr }}' + register: setup_sg_3 + +- name: store the IDs + set_fact: + subnet_a: "{{ setup_subnet_1.subnet.id }}" + subnet_b: "{{ setup_subnet_2.subnet.id }}" + subnet_c: "{{ setup_subnet_3.subnet.id }}" + subnet_a2: "{{ setup_subnet_4.subnet.id }}" + sg_a: "{{ setup_sg_1.group_id }}" + sg_b: "{{ setup_sg_2.group_id }}" + sg_c: "{{ setup_sg_3.group_id }}" diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml new file mode 100644 index 00000000000..6644cf9833c --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml @@ -0,0 +1,79 @@ +--- +- block: + ## Setup an ELB for testing changing one thing at a time + - name: Create ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] + listeners: '{{ default_listeners }}' + health_check: '{{ default_health_check }}' + wait: true + scheme: 'internal' + subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + security_group_ids: ['{{ sg_a }}'] + tags: '{{ default_tags }}' + cross_az_load_balancing: True + idle_timeout: '{{ default_idle_timeout }}' + connection_draining_timeout: '{{ default_drain_timeout }}' + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + enabled: true + register: result + + - name: Verify that simple parameters were set + assert: + that: + - result is changed + - result.elb.status == "created" + - availability_zone_a in result.elb.zones + - availability_zone_b in result.elb.zones + - subnet_a in result.elb.subnets + - subnet_b in result.elb.subnets + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + - sg_a in result.elb.security_group_ids + - sg_b not in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + - result.elb.health_check.healthy_threshold == default_health_check['healthy_threshold'] + - result.elb.health_check.interval == default_health_check['interval'] + - result.elb.health_check.target == default_health_check_target + - result.elb.health_check.timeout == default_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == default_health_check['unhealthy_threshold'] + - result.elb.tags == default_tags + - result.elb.cross_az_load_balancing == 'yes' + - result.elb.idle_timeout == default_idle_timeout + - result.elb.connection_draining_timeout == default_drain_timeout + - result.elb.proxy_policy == None + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + + ## AZ / Subnet changes are tested in wth the public/internal tests + ## because they depend on the scheme of the LB + + - include_tasks: 'simple_securitygroups.yml' + - include_tasks: 'simple_listeners.yml' + - include_tasks: 'simple_healthcheck.yml' + - include_tasks: 'simple_tags.yml' + - include_tasks: 'simple_cross_az.yml' + - include_tasks: 'simple_idle_timeout.yml' + - include_tasks: 'simple_draining_timeout.yml' + - include_tasks: 'simple_proxy_policy.yml' + - include_tasks: 'simple_stickiness.yml' + - include_tasks: 'simple_instances.yml' + - include_tasks: 'simple_logging.yml' + + always: + + # ============================================================ + - name: remove the test load balancer + elb_classic_lb: + name: "{{ elb_name }}" + state: absent + wait: true + register: result + ignore_errors: true diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml new file mode 100644 index 00000000000..104b0afb5fa --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml @@ -0,0 +1,100 @@ +--- +# =========================================================== + +- name: disable cross-az balancing on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: disable cross-az balancing on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + +- assert: + that: + - result is changed + - result.elb.cross_az_load_balancing == 'no' + +- name: disable cross-az balancing on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: disable cross-az balancing on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: False + register: result + +- assert: + that: + - result is not changed + - result.elb.cross_az_load_balancing == 'no' + +# =========================================================== + +- name: re-enable cross-az balancing on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: re-enable cross-az balancing on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + +- assert: + that: + - result is changed + - result.elb.cross_az_load_balancing == 'yes' + +- name: re-enable cross-az balancing on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: re-enable cross-az balancing on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + cross_az_load_balancing: True + register: result + +- assert: + that: + - result is not changed + - result.elb.cross_az_load_balancing == 'yes' diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml new file mode 100644 index 00000000000..825ce2185fb --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml @@ -0,0 +1,148 @@ +--- +# =========================================================== + +- name: disable connection draining on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: disable connection draining on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + +- assert: + that: + - result is changed + +- name: disable connection draining on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: disable connection draining on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: 0 + register: result + +- assert: + that: + - result is not changed + +# =========================================================== + +- name: re-enable connection draining on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: re-enable connection draining on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + +- assert: + that: + - result is changed + - result.elb.connection_draining_timeout == default_drain_timeout + +- name: re-enable connection draining on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: re-enable connection draining on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ default_drain_timeout }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.connection_draining_timeout == default_drain_timeout + +# =========================================================== + +- name: update connection draining timout on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: update connection draining timout on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + +- assert: + that: + - result is changed + - result.elb.connection_draining_timeout == updated_drain_timeout + +- name: update connection draining timout on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: update connection draining timout on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + connection_draining_timeout: '{{ updated_drain_timeout }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.connection_draining_timeout == updated_drain_timeout diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml new file mode 100644 index 00000000000..179e8cb80ef --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml @@ -0,0 +1,116 @@ +--- +# Note: AWS doesn't support disabling health checks +# ============================================================== +- name: Non-HTTP Healthcheck (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Non-HTTP Healthcheck + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + +- assert: + that: + - result is changed + - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold'] + - result.elb.health_check.interval == nonhttp_health_check['interval'] + - result.elb.health_check.target == nonhttp_health_check_target + - result.elb.health_check.timeout == nonhttp_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold'] + +- name: Non-HTTP Healthcheck - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Non-HTTP Healthcheck - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ nonhttp_health_check }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold'] + - result.elb.health_check.interval == nonhttp_health_check['interval'] + - result.elb.health_check.target == nonhttp_health_check_target + - result.elb.health_check.timeout == nonhttp_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold'] + +# ============================================================== + +- name: Update Healthcheck (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update Healthcheck + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + +- assert: + that: + - result is changed + - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] + - result.elb.health_check.interval == updated_health_check['interval'] + - result.elb.health_check.target == updated_health_check_target + - result.elb.health_check.timeout == updated_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] + +- name: Update Healthcheck - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update Healthcheck - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + health_check: '{{ updated_health_check }}' + register: result + +- assert: + that: + - result is not changed + - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] + - result.elb.health_check.interval == updated_health_check['interval'] + - result.elb.health_check.target == updated_health_check_target + - result.elb.health_check.timeout == updated_health_check['response_timeout'] + - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml new file mode 100644 index 00000000000..e89dd25f18c --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml @@ -0,0 +1,50 @@ +--- +# =========================================================== + +- name: update idle connection timeout on ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: update idle connection timeout on ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + +- assert: + that: + - result is changed + - result.elb.idle_timeout == updated_idle_timeout + +- name: update idle connection timeout on ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: update idle connection timeout on ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + idle_timeout: "{{ updated_idle_timeout }}" + register: result + +- assert: + that: + - result is not changed + - result.elb.idle_timeout == updated_idle_timeout diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml new file mode 100644 index 00000000000..a973d68dae1 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml @@ -0,0 +1,411 @@ +--- +- name: Add SSH listener and health check to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ ssh_listeners }}" + health_check: "{{ ssh_health_check }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - ssh_listener_tuples[0] in result.elb.listeners + +# Make sure that the instances are 'OK' + +- name: Wait for instance a + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-a" + vpc_subnet_id: "{{ subnet_a }}" + instance_type: t2.micro + wait: true + security_group: "{{ sg_a }}" + register: ec2_instance_a + +- name: Wait for instance b + ec2_instance: + name: "ansible-test-{{ tiny_prefix }}-elb-b" + vpc_subnet_id: "{{ subnet_b }}" + instance_type: t2.micro + wait: true + security_group: "{{ sg_b }}" + register: ec2_instance_b + +- assert: + that: + - ec2_instance_a is successful + - ec2_instance_b is successful + +# ============================================================== + +- name: Add an instance to the LB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Add an instance to the LB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + +- assert: + that: + - result is changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +- name: Add an instance to the LB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Add an instance to the LB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + wait: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +# ============================================================== + +- name: Add second instance to the LB without purge (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Add second instance to the LB without purge + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + +- assert: + that: + - result is changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +- name: Add second instance to the LB without purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Add second instance to the LB without purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: false + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +# ============================================================== + +- name: Both instances with purge - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + - '{{ instance_b }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Both instances with purge - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + - '{{ instance_b }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +- name: Both instances with purge - different order - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + - '{{ instance_a }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Both instances with purge - different order - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + - '{{ instance_a }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b in result.elb.instances + +# ============================================================== + +- name: Remove first instance from LB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Remove first instance from LB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances + +- name: Remove first instance from LB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Remove first instance from LB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is not changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances + +# ============================================================== + +- name: Switch instances in LB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Switch instances in LB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +- name: Switch instances in LB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Switch instances in LB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_a }}' + purge_instance_ids: true + wait: true + register: result + +- assert: + that: + - result is not changed + - instance_a in result.elb.instances + - instance_b not in result.elb.instances + +# ============================================================== + +- name: Switch instances in LB - no wait (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Switch instances in LB - no wait + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances + +- name: Switch instances in LB - no wait - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Switch instances in LB - no wait - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + instance_ids: + - '{{ instance_b }}' + purge_instance_ids: true + register: result + +- assert: + that: + - result is not changed + - instance_a not in result.elb.instances + - instance_b in result.elb.instances diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml new file mode 100644 index 00000000000..8edb96543ab --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml @@ -0,0 +1,196 @@ +--- +# =========================================================== +# remove a listener (no purge) +# remove a listener (purge) +# add a listener +# update a listener (same port) +# =========================================================== +# Test passing only one of the listeners +# Without purge +- name: Test partial Listener to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test partial Listener to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + +# With purge +- name: Test partial Listener with purge to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Test partial Listener with purge to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + +- assert: + that: + - result is changed + - purged_listener_tuples[0] in result.elb.listeners + +- name: Test partial Listener with purge to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test partial Listener with purge to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ purged_listeners }}" + purge_listeners: true + register: result + +- assert: + that: + - result is not changed + - purged_listener_tuples[0] in result.elb.listeners + +# =========================================================== +# Test re-adding a listener +- name: Test re-adding listener to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Test re-adding listener to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + +- assert: + that: + - result is changed + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + +- name: Test re-adding listener to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test re-adding listener to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ default_listeners }}" + register: result + +- assert: + that: + - result is not changed + - default_listener_tuples[0] in result.elb.listeners + - default_listener_tuples[1] in result.elb.listeners + +# =========================================================== +# Test passing an updated listener +- name: Test updated listener to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Test updated listener to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - updated_listener_tuples[0] in result.elb.listeners + - updated_listener_tuples[1] in result.elb.listeners + +- name: Test updated listener to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Test updated listener to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ updated_listeners }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - updated_listener_tuples[0] in result.elb.listeners + - updated_listener_tuples[1] in result.elb.listeners diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml new file mode 100644 index 00000000000..5e489eaf018 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml @@ -0,0 +1,587 @@ +--- +# =========================================================== + +- name: S3 logging for ELB - implied enabled (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: S3 logging for ELB - implied enabled + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Disable S3 logging for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable S3 logging for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +- name: Disable S3 logging for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable S3 logging for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +# =========================================================== + +- name: Disable S3 logging for ELB - ignore extras (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable S3 logging for ELB - ignore extras + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +- name: Disable S3 logging for ELB - no extras (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable S3 logging for ELB - no extras + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: false + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.enabled == False + +# =========================================================== + +- name: Re-enable S3 logging for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable S3 logging for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Re-enable S3 logging for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Re-enable S3 logging for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ default_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update ELB Log delivery interval for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update ELB Log delivery interval for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Update ELB Log delivery interval for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update ELB Log delivery interval for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_a }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_a + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update S3 Logging Location for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update S3 Logging Location for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Update S3 Logging Location for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update S3 Logging Location for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ default_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == default_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update S3 Logging Prefix for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update S3 Logging Prefix for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == updated_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Update S3 Logging Prefix for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update S3 Logging Prefix for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '{{ updated_logging_prefix }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == updated_logging_prefix + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Empty S3 Logging Prefix for ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Empty S3 Logging Prefix for ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + +- assert: + that: + - result is changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Empty S3 Logging Prefix for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Empty S3 Logging Prefix for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +- name: Empty string S3 Logging Prefix for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_prefix: '' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Empty stringS3 Logging Prefix for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + interval: '{{ updated_logging_interval }}' + s3_prefix: '' + s3_location: '{{ s3_logging_bucket_b }}' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True + +# =========================================================== + +- name: Update S3 Logging interval for ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '' + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update S3 Logging interval for ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + access_logs: + enabled: true + s3_location: '{{ s3_logging_bucket_b }}' + s3_prefix: '' + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.load_balancer_attributes.access_log.emit_interval == 60 + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_name == s3_logging_bucket_b + - result.load_balancer.load_balancer_attributes.access_log.s3_bucket_prefix == '' + - result.load_balancer.load_balancer_attributes.access_log.enabled == True diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml new file mode 100644 index 00000000000..50c5ce519cc --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml @@ -0,0 +1,141 @@ +--- +# =========================================================== +- name: Enable proxy protocol on a listener (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Enable proxy protocol on a listener + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - result.elb.proxy_policy == "ProxyProtocol-policy" + - result.load_balancer.backend_server_descriptions | length == 1 + - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] + +- name: Enable proxy protocol on a listener - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Enable proxy protocol on a listener - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - result.elb.proxy_policy == "ProxyProtocol-policy" + - result.load_balancer.backend_server_descriptions | length == 1 + - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] + +# =========================================================== + +- name: Disable proxy protocol on a listener (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable proxy protocol on a listener + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - result.load_balancer.backend_server_descriptions | length == 0 + +- name: Disable proxy protocol on a listener - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable proxy protocol on a listener - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ unproxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is not changed + - result.load_balancer.backend_server_descriptions | length == 0 + +# =========================================================== + +- name: Re-enable proxy protocol on a listener (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable proxy protocol on a listener + elb_classic_lb: + name: "{{ elb_name }}" + state: present + listeners: "{{ proxied_listener }}" + purge_listeners: false + register: result + +- assert: + that: + - result is changed + - result.elb.proxy_policy == "ProxyProtocol-policy" + - result.load_balancer.backend_server_descriptions | length == 1 + - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml new file mode 100644 index 00000000000..21a56d79265 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml @@ -0,0 +1,106 @@ +--- +- name: Assign Security Groups to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Assign Security Groups to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + +- assert: + that: + - result is changed + - sg_a not in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + +- name: Assign Security Groups to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Assign Security Groups to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_ids: ['{{ sg_b }}'] + register: result + +- assert: + that: + - result is not changed + - sg_a not in result.elb.security_group_ids + - sg_b in result.elb.security_group_ids + - sg_c not in result.elb.security_group_ids + +#===================================================================== + +- name: Assign Security Groups to ELB by name (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Assign Security Groups to ELB by name + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + +- assert: + that: + - result is changed + - sg_a in result.elb.security_group_ids + - sg_b not in result.elb.security_group_ids + - sg_c in result.elb.security_group_ids + +- name: Assign Security Groups to ELB by name - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Assign Security Groups to ELB by name - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + register: result + +- assert: + that: + - result is not changed + - sg_a in result.elb.security_group_ids + - sg_b not in result.elb.security_group_ids + - sg_c in result.elb.security_group_ids diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml new file mode 100644 index 00000000000..9c0f925ecf9 --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml @@ -0,0 +1,390 @@ +--- +# ============================================================== +- name: App Cookie Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: App Cookie Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: App Cookie Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: App Cookie Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is not changed + +# ============================================================== +- name: Update App Cookie Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update App Cookie Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Update App Cookie Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update App Cookie Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_app_stickiness }}" + register: result + +- assert: + that: + - result is not changed + + +# ============================================================== + +- name: Disable Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is changed + +- name: Disable Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is not changed + +# ============================================================== + +- name: Re-enable App Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable App Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Re-enable App Stickiness (check_mode) - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Re-enable App Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ app_stickiness }}" + register: result + +- assert: + that: + - result is not changed + +# ============================================================== +- name: LB Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: LB Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: LB Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: LB Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ lb_stickiness }}" + register: result + +- assert: + that: + - result is not changed + +# ============================================================== +- name: Update LB Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Update LB Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Update LB Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Update LB Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is not changed + + +# ============================================================== + +- name: Disable Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Disable Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is changed + +- name: Disable Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Disable Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: + enabled: false + register: result + +- assert: + that: + - result is not changed + +# ============================================================== + +- name: Re-enable LB Stickiness (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Re-enable LB Stickiness + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is changed + +- name: Re-enable LB Stickiness - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Re-enable LB Stickiness - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + stickiness: "{{ updated_lb_stickiness }}" + register: result + +- assert: + that: + - result is not changed diff --git a/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml b/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml new file mode 100644 index 00000000000..b78eb1c583f --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml @@ -0,0 +1,141 @@ +--- +# =========================================================== +# partial tags (no purge) +# update tags (no purge) +# update tags (with purge) +# =========================================================== +- name: Pass partial tags to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ partial_tags }}" + purge_tags: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Pass partial tags to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ partial_tags }}" + purge_tags: false + register: result + +- assert: + that: + - result is not changed + - result.elb.tags == default_tags + +# =========================================================== + +- name: Add tags to ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Add tags to ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + +- assert: + that: + - result is changed + - result.elb.tags == ( default_tags | combine(updated_tags) ) + +- name: Add tags to ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Add tags to ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: false + register: result + +- assert: + that: + - result is not changed + - result.elb.tags == ( default_tags | combine(updated_tags) ) + +# =========================================================== + +- name: Purge tags from ELB (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + check_mode: true + +- assert: + that: + - result is changed + +- name: Purge tags from ELB + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + +- assert: + that: + - result is changed + - result.elb.tags == updated_tags + +- name: Purge tags from ELB - idempotency (check_mode) + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + check_mode: true + +- assert: + that: + - result is not changed + +- name: Purge tags from ELB - idempotency + elb_classic_lb: + name: "{{ elb_name }}" + state: present + tags: "{{ updated_tags }}" + purge_tags: true + register: result + +- assert: + that: + - result is not changed + - result.elb.tags == updated_tags + +# =========================================================== diff --git a/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 b/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 new file mode 100644 index 00000000000..ee69dae33fb --- /dev/null +++ b/tests/integration/targets/elb_classic_lb/templates/s3_policy.j2 @@ -0,0 +1,15 @@ +{ + "Version": "2012-10-17", + "Id": "ELB-Logging-Policy", + "Statement": [ + { + "Sid": "ELB-Logging", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ access_log_account_id }}:root" + }, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{{ s3_logging_bucket }}/*" + } + ] +} diff --git a/tests/integration/targets/ec2_elb_lb/vars/main.yml b/tests/integration/targets/elb_classic_lb/vars/main.yml similarity index 100% rename from tests/integration/targets/ec2_elb_lb/vars/main.yml rename to tests/integration/targets/elb_classic_lb/vars/main.yml