diff --git a/changelogs/fragments/ecs_service_and_ecs_integration_test.yml b/changelogs/fragments/ecs_service_and_ecs_integration_test.yml new file mode 100644 index 00000000000..a0788a18d41 --- /dev/null +++ b/changelogs/fragments/ecs_service_and_ecs_integration_test.yml @@ -0,0 +1,8 @@ +minor_changes: + - ecs_service - new parameter ``purge_placement_strategy`` to have the ability to remove the placement strategy of an ECS Service (https://github.com/ansible-collections/community.aws/pull/1716). + - ecs_service - new parameter ``purge_placement_constraints`` to have the ability to remove the placement constraints of an ECS Service (https://github.com/ansible-collections/community.aws/pull/1716). +trivial: + - ecs_cluster - rework and repair ecs_cluster integration test. +deprecated_features: + - ecs_service - In a release after 2024-06-01, tha default value of ``purge_placement_strategy`` will be change from ``false`` to ``true`` (https://github.com/ansible-collections/community.aws/pull/1716). + - ecs_service - In a release after 2024-06-01, tha default value of ``purge_placement_constraints`` will be change from ``false`` to ``true`` (https://github.com/ansible-collections/community.aws/pull/1716). \ No newline at end of file diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py index a5af0df79fd..24df26838f3 100644 --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -148,6 +148,14 @@ description: A cluster query language expression to apply to the constraint. required: false type: str + purge_placement_constraints: + version_added: 5.3.0 + description: + - Toggle overwriting of existing placement constraints. This is needed for backwards compatibility. + - By default I(purge_placement_constraints=false). In a release after 2024-06-01 this will be changed to I(purge_placement_constraints=true). + required: false + type: bool + default: false placement_strategy: description: - The placement strategy objects to use for tasks in your service. You can specify a maximum of 5 strategy rules per service. @@ -162,6 +170,14 @@ field: description: The field to apply the placement strategy against. type: str + purge_placement_strategy: + version_added: 5.3.0 + description: + - Toggle overwriting of existing placement strategy. This is needed for backwards compatibility. + - By default I(purge_placement_strategy=false). In a release after 2024-06-01 this will be changed to I(purge_placement_strategy=true). + required: false + type: bool + default: false force_deletion: description: - Forcibly delete the service. Required when deleting a service with >0 scale, or no target group. @@ -396,7 +412,9 @@ returned: always type: int loadBalancers: - description: A list of load balancer objects + description: + - A list of load balancer objects + - Updating the loadbalancer configuration of an existing service requires botocore>=1.24.14. returned: always type: complex contains: @@ -822,7 +840,8 @@ def create_service(self, service_name, cluster_name, task_definition, load_balan def update_service(self, service_name, cluster_name, task_definition, desired_count, deployment_configuration, placement_constraints, placement_strategy, network_configuration, health_check_grace_period_seconds, - force_new_deployment, capacity_provider_strategy, load_balancers): + force_new_deployment, capacity_provider_strategy, load_balancers, + purge_placement_constraints, purge_placement_strategy): params = dict( cluster=cluster_name, service=service_name, @@ -834,9 +853,15 @@ def update_service(self, service_name, cluster_name, task_definition, desired_co params['placementConstraints'] = [{key: value for key, value in constraint.items() if value is not None} for constraint in placement_constraints] + if purge_placement_constraints and not placement_constraints: + params['placementConstraints'] = [] + if placement_strategy: params['placementStrategy'] = placement_strategy + if purge_placement_strategy and not placement_strategy: + params['placementStrategy'] = [] + if network_configuration: params['networkConfiguration'] = network_configuration if force_new_deployment: @@ -907,6 +932,7 @@ def main(): expression=dict(required=False, type='str') ) ), + purge_placement_constraints=dict(required=False, default=False, type='bool'), placement_strategy=dict( required=False, default=[], @@ -917,6 +943,7 @@ def main(): field=dict(type='str'), ) ), + purge_placement_strategy=dict(required=False, default=False, type='bool'), health_check_grace_period_seconds=dict(required=False, type='int'), network_configuration=dict(required=False, type='dict', options=dict( subnets=dict(type='list', elements='str'), @@ -1061,6 +1088,8 @@ def main(): module.params['force_new_deployment'], capacityProviders, updatedLoadBalancers, + module.params['purge_placement_constraints'], + module.params['purge_placement_strategy'], ) else: diff --git a/tests/integration/targets/ecs_cluster/aliases b/tests/integration/targets/ecs_cluster/aliases index 8f38aa6e6ec..3a010d0edd3 100644 --- a/tests/integration/targets/ecs_cluster/aliases +++ b/tests/integration/targets/ecs_cluster/aliases @@ -1,6 +1,4 @@ -# reason: slow -# Tests take around 15 minutes to run -unsupported +time=20m cloud/aws diff --git a/tests/integration/targets/ecs_cluster/defaults/main.yml b/tests/integration/targets/ecs_cluster/defaults/main.yml index 368ab927187..77a9efb07a0 100644 --- a/tests/integration/targets/ecs_cluster/defaults/main.yml +++ b/tests/integration/targets/ecs_cluster/defaults/main.yml @@ -4,6 +4,8 @@ user_data: | echo ECS_CLUSTER={{ ecs_cluster_name }} >> /etc/ecs/ecs.config ecs_service_name: "{{ resource_prefix }}-service" +ecs_service_role_name: "ansible-test-ecsServiceRole-{{ tiny_prefix }}" +ecs_task_role_name: "ansible-test-ecsServiceRole-task-{{ tiny_prefix }}" ecs_task_image_path: nginx ecs_task_name: "{{ resource_prefix }}-task" ecs_task_memory: 128 diff --git a/tests/integration/targets/ecs_cluster/meta/main.yml b/tests/integration/targets/ecs_cluster/meta/main.yml index 32cf5dda7ed..7f42526eb30 100644 --- a/tests/integration/targets/ecs_cluster/meta/main.yml +++ b/tests/integration/targets/ecs_cluster/meta/main.yml @@ -1 +1,4 @@ -dependencies: [] +dependencies: + - role: setup_botocore_pip + vars: + botocore_version: "1.24.14" diff --git a/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml b/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml new file mode 100644 index 00000000000..ea2709a617d --- /dev/null +++ b/tests/integration/targets/ecs_cluster/tasks/01_create_requirements.yml @@ -0,0 +1,147 @@ +- name: ensure IAM service role exists + iam_role: + name: "{{ ecs_service_role_name }}" + assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" + state: present + create_instance_profile: yes + managed_policy: + - AmazonEC2ContainerServiceRole + wait: True + +- name: ensure AWSServiceRoleForECS role exists + iam_role_info: + name: AWSServiceRoleForECS + register: iam_role_result + +# # This should happen automatically with the right permissions... +#- name: fail if AWSServiceRoleForECS role does not exist +# fail: +# msg: > +# Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create +# a linked role for AWS VPC load balancer management +# when: not iam_role_result.iam_roles + +- name: create a VPC to work in + ec2_vpc_net: + cidr_block: 10.0.0.0/16 + state: present + name: '{{ resource_prefix }}_ecs_cluster' + resource_tags: + Name: '{{ resource_prefix }}_ecs_cluster' + register: setup_vpc + +- name: create a key pair to use for creating an ec2 instance + ec2_key: + name: '{{ resource_prefix }}_ecs_cluster' + state: present + when: ec2_keypair is not defined # allow override in cloud-config-aws.ini + register: setup_key + +- name: create subnets + ec2_vpc_subnet: + az: '{{ aws_region }}{{ item.zone }}' + tags: + Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: "{{ item.cidr }}" + state: present + register: setup_subnet + with_items: + - zone: a + cidr: 10.0.1.0/24 + - zone: b + cidr: 10.0.2.0/24 + +- name: create an internet gateway so that ECS agents can talk to ECS + ec2_vpc_igw: + vpc_id: '{{ setup_vpc.vpc.id }}' + state: present + register: igw + +- name: create a security group to use for creating an ec2 instance + ec2_group: + name: '{{ resource_prefix }}_ecs_cluster-sg' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: # allow all ssh traffic but nothing else + - ports: 22 + cidr_ip: 0.0.0.0/0 + register: setup_sg + +- set_fact: + # As a lookup plugin we don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token | default(omit) }}" + no_log: True + +- name: set image id fact + set_fact: + ecs_image_id: "{{ lookup('aws_ssm', '/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id', **connection_args) }}" + +- name: provision ec2 instance to create an image + ec2_instance: + key_name: '{{ ec2_keypair|default(setup_key.key.name) }}' + instance_type: t3.micro + state: present + image_id: '{{ ecs_image_id }}' + wait: yes + user_data: "{{ user_data }}" + instance_role: "{{ ecs_service_role_name }}" + tags: + Name: '{{ resource_prefix }}_ecs_agent' + security_group: '{{ setup_sg.group_id }}' + vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}' + register: setup_instance + +- name: create target group + elb_target_group: + name: "{{ ecs_target_group_name }}1" + state: present + protocol: HTTP + port: 8080 + modify_targets: no + vpc_id: '{{ setup_vpc.vpc.id }}' + target_type: instance + health_check_interval: 5 + health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + register: elb_target_group_instance + +- name: create second target group to use ip target_type + elb_target_group: + name: "{{ ecs_target_group_name }}2" + state: present + protocol: HTTP + port: 8080 + modify_targets: no + vpc_id: '{{ setup_vpc.vpc.id }}' + target_type: ip + health_check_interval: 5 + health_check_timeout: 2 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + register: elb_target_group_ip + +- name: create load balancer + elb_application_lb: + name: "{{ ecs_load_balancer_name }}" + state: present + scheme: internal + security_groups: '{{ setup_sg.group_id }}' + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ ecs_target_group_name }}1" + - Protocol: HTTP + Port: 81 + DefaultActions: + - Type: forward + TargetGroupName: "{{ ecs_target_group_name }}2" diff --git a/tests/integration/targets/ecs_cluster/tasks/10_ecs_cluster.yml b/tests/integration/targets/ecs_cluster/tasks/10_ecs_cluster.yml new file mode 100644 index 00000000000..f2c868674f8 --- /dev/null +++ b/tests/integration/targets/ecs_cluster/tasks/10_ecs_cluster.yml @@ -0,0 +1,76 @@ +# cluster "{{ ecs_cluster_name }}" is used for ecs_service tests +- name: create an ECS cluster + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: present + register: ecs_cluster + +- name: check that ecs_cluster changed + assert: + that: + - ecs_cluster.changed + +- name: immutable create same ECS cluster + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: present + register: ecs_cluster_again + +- name: check that ecs_cluster did not change + assert: + that: + - not ecs_cluster_again.changed + +- name: create an ECS cluster to test capacity provider strategy + ecs_cluster: + name: "{{ ecs_cluster_name }}-cps" + state: present + register: ecs_cluster + +- name: add capacity providers and strategy + ecs_cluster: + name: "{{ ecs_cluster_name }}-cps" + state: present + purge_capacity_providers: True + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + register: ecs_cluster_update + +- name: check that ecs_cluster was correctly updated + assert: + that: + - ecs_cluster_update.changed + - ecs_cluster_update.cluster is defined + - ecs_cluster_update.cluster.capacityProviders is defined + - "'FARGATE' in ecs_cluster_update.cluster.capacityProviders" + +- name: immutable add capacity providers and strategy + ecs_cluster: + name: "{{ ecs_cluster_name }}-cps" + state: present + purge_capacity_providers: True + capacity_providers: + - FARGATE + - FARGATE_SPOT + capacity_provider_strategy: + - capacity_provider: FARGATE + base: 1 + weight: 1 + - capacity_provider: FARGATE_SPOT + weight: 100 + register: ecs_cluster_update + +- name: check that ecs_cluster was correctly updated + assert: + that: + - not ecs_cluster_update.changed + - ecs_cluster_update.cluster is defined + - ecs_cluster_update.cluster.capacityProviders is defined + - "'FARGATE' in ecs_cluster_update.cluster.capacityProviders" diff --git a/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml new file mode 100644 index 00000000000..4f437ade014 --- /dev/null +++ b/tests/integration/targets/ecs_cluster/tasks/20_ecs_service.yml @@ -0,0 +1,909 @@ +- name: create task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + state: present + register: ecs_task_definition + +- name: check that initial task definition changes + assert: + that: + - ecs_task_definition.changed + +- name: recreate task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + state: present + register: ecs_task_definition_again + +- name: check that task definition does not change + assert: + that: + - not ecs_task_definition_again.changed + +- name: obtain ECS task definition facts + ecs_taskdefinition_info: + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + +- name: create ECS service definition + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + placement_constraints: + - type: distinctInstance + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + register: ecs_service + +- name: check that ECS service creation changed + assert: + that: + - ecs_service.changed + +- name: check that placement constraint has been applied + assert: + that: + - "ecs_service.service.placementConstraints[0].type == 'distinctInstance'" + +- name: check that ECS service was created with deployment_circuit_breaker + assert: + that: + - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.enable + - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.rollback + +- name: create same ECS service definition (should not change) + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + placement_constraints: + - type: distinctInstance + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + register: ecs_service_again + +- name: check that ECS service recreation changed nothing + assert: + that: + - not ecs_service_again.changed + +- name: create same ECS service definition via force_new_deployment + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + force_new_deployment: true + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + placement_constraints: + - type: distinctInstance + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + register: ecs_service_again + +- name: check that ECS service recreation changed again due force_new_deployment + assert: + that: + - ecs_service_again.changed + +- name: attempt to use ECS network configuration on task definition without awsvpc network_mode (expected to fail) + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}3" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + register: ecs_service_network_without_awsvpc_task + ignore_errors: true + +- name: assert that using ECS network configuration with non AWSVPC task definition fails + assert: + that: + - ecs_service_network_without_awsvpc_task is failed + +- name: scale down ECS service + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + wait: true + register: ecs_service_scale_down + +- name: assert that ECS service is scaled down + assert: + that: + - ecs_service_scale_down.changed + - ecs_service_scale_down.service.desiredCount == 0 + +- name: scale down ECS service again + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "{{ ecs_service_role_name }}" + register: ecs_service_scale_down + +- name: assert no change + assert: + that: + - not ecs_service_scale_down.changed + - ecs_service_scale_down.service.desiredCount == 0 + +- name: delete ECS service definition + ecs_service: + state: absent + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + wait: true + register: delete_ecs_service + +- name: assert that deleting ECS service worked + assert: + that: + - delete_ecs_service.changed + +- name: delete ECS service definition again + ecs_service: + state: absent + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + register: delete_ecs_service + +- name: assert no change + assert: + that: + - not delete_ecs_service.changed + +- name: create VPC-networked task definition with host port set to 0 (expected to fail) + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + state: present + network_mode: awsvpc + register: ecs_task_definition_vpc_no_host_port + ignore_errors: true + +- name: check that awsvpc task definition with host port 0 fails gracefully + assert: + that: + - ecs_task_definition_vpc_no_host_port is failed + - "'error' not in ecs_task_definition_vpc_no_host_port" + +- name: create VPC-networked task definition with host port set to 8080 + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + state: present + vars: + ecs_task_host_port: 8080 + register: ecs_task_definition_vpc_with_host_port + +- name: obtain ECS task definition facts + ecs_taskdefinition_info: + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" + register: ecs_taskdefinition_info + +- name: assert that network mode is awsvpc + assert: + that: + - "ecs_taskdefinition_info.network_mode == 'awsvpc'" + +- name: create ECS service definition with network configuration + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + register: create_ecs_service_with_vpc + +- name: assert that network configuration is correct + assert: + that: + - "'networkConfiguration' in create_ecs_service_with_vpc.service" + - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration" + - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" + - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" + +- name: create ecs_service using health_check_grace_period_seconds + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-mft" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + health_check_grace_period_seconds: 30 + desired_count: 1 + state: present + register: ecs_service_creation_hcgp + +- name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds + assert: + that: + - ecs_service_creation_hcgp.changed + - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 30" + +- name: update ecs_service using health_check_grace_period_seconds + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-mft" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + health_check_grace_period_seconds: 10 + state: present + register: ecs_service_creation_hcgp2 + +- name: check that module returns success + assert: + that: + - ecs_service_creation_hcgp2.changed + - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 10" + +- name: update ecs_service using REPLICA scheduling_strategy + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-replica" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + scheduling_strategy: "REPLICA" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + state: present + register: ecs_service_creation_replica + +- name: obtain facts for all ECS services in the cluster + ecs_service_info: + cluster: "{{ ecs_cluster_name }}" + details: true + events: false + register: ecs_service_info + +- name: assert that facts are useful + assert: + that: + - "'services' in ecs_service_info" + - ecs_service_info.services | length > 0 + - "'events' not in ecs_service_info.services[0]" + +- name: obtain facts for existing service in the cluster + ecs_service_info: + cluster: "{{ ecs_cluster_name }}" + service: "{{ ecs_service_name }}" + details: true + events: false + register: ecs_service_info + +- name: assert that existing service is available and running + assert: + that: + - "ecs_service_info.services|length == 1" + - "ecs_service_info.services_not_running|length == 0" + +- name: obtain facts for non-existent service in the cluster + ecs_service_info: + cluster: "{{ ecs_cluster_name }}" + service: madeup + details: true + events: false + register: ecs_service_info + +- name: assert that non-existent service is missing + assert: + that: + - "ecs_service_info.services_not_running[0].reason == 'MISSING'" + +- name: obtain specific ECS service facts + ecs_service_info: + service: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + details: true + register: ecs_service_info + +- name: check that facts contain network configuration + assert: + that: + - "'networkConfiguration' in ecs_service_info.services[0]" + +- name: attempt to get facts from missing task definition + ecs_taskdefinition_info: + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}" + +- name: Create another task definition with placement constraints + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-constraints" + state: present + placement_constraints: "{{ ecs_taskdefinition_placement_constraints }}" + register: ecs_task_definition_constraints + +- name: Check that task definition has been created + assert: + that: + - ecs_task_definition_constraints is changed + - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == "{{ ecs_taskdefinition_placement_constraints[0].type }}" + - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == "{{ ecs_taskdefinition_placement_constraints[0].expression }}" + +- name: Remove ecs task definition with placement constraints + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + arn: "{{ ecs_task_definition_constraints.taskdefinition.taskDefinitionArn }}" + state: absent + register: ecs_task_definition_constraints_delete + +- name: Check that task definition has been deleted + assert: + that: + - ecs_task_definition_constraints_delete is changed + +- name: Remove ecs task definition with placement constraints again + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + arn: "{{ ecs_task_definition_constraints.taskdefinition.taskDefinitionArn }}" + state: absent + register: ecs_task_definition_constraints_delete + +- name: Assert no change + assert: + that: + - ecs_task_definition_constraints_delete is not changed + +- name: Create ecs_service without load balancer + ecs_service: + name: "{{ ecs_service_name }}-lb" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + desired_count: 1 + state: present + register: ecs_service_create_no_load_balancer + +- name: Check ecs_service does not have load balancer + assert: + that: + - ecs_service_create_no_load_balancer.changed + - "ecs_service_create_no_load_balancer.service.loadBalancers | length == 0" + +- name: Update ecs_service load balancer + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-lb" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + state: present + register: ecs_service_update_load_balancer + +- name: Check ecs_service load balancer updated + assert: + that: + - ecs_service_update_load_balancer.changed + - "ecs_service_update_load_balancer.service.loadBalancers | length == 1" + - "ecs_service_update_load_balancer.service.loadBalancers[0].containerName == ecs_task_name" + - "ecs_service_update_load_balancer.service.loadBalancers[0].containerPort == ecs_task_container_port" + - "ecs_service_update_load_balancer.service.loadBalancers[0].targetGroupArn == elb_target_group_instance.target_group_arn" + +- name: Create ecs service with placement constraints + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-constraint" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + placement_constraints: + - type: distinctInstance + desired_count: 1 + state: present + register: ecs_service_creation_constraints + +- name: Assert ecs service constraint + assert: + that: + - ecs_service_creation_constraints.changed + - "ecs_service_creation_constraints.service.placementConstraints | length == 1" + - "ecs_service_creation_constraints.service.placementConstraints[0].type == 'distinctInstance'" + +- name: > + wait until deployment is completed | + we're facing here multiple issues when testing constraints and later also placement_strategy + + >> "rolloutStateReason": "ECS deployment ecs-svc/5156684577543126023 in progress.", + constraints and placement strategies are only changeable if the rollout state is "COMPLETED" + + a) ecs_service has currently no waiter function. so this is a DIY waiter + b) the state reached never "COMPLETED" because something if wrong with the ECS EC2 Instances + or the network setup. The EC2 instance never arrived as an active instance in the cluster. + + >> no container instance met all of its requirements. Reason: No Container Instances were found in your cluster. + >> For more information, see the Troubleshooting section of the Amazon ECS Developer Guide. + >> ec2_instance networking does not work correctly, no instance available for the cluster + + Because all of this, all following tasks, that test the change of a constraint or placement stragegy are + using `force_new_deployment: true`. That ignores a) and b). + ignore_errors: true + ecs_service_info: + name: "{{ ecs_service_name }}-constraint" + cluster: "{{ ecs_cluster_name }}" + details: true + register: ECS + retries: 10 + delay: 5 + until: "ECS.services[0].deployments[0].rolloutState == 'COMPLETED'" + +- name: Update ecs service's placement constraints + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-constraint" + cluster: "{{ ecs_cluster_name }}" + force_new_deployment: true + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + placement_constraints: + - type: memberOf + expression: 'attribute:ecs.instance-type == t3.micro' + desired_count: 1 + state: present + register: ecs_service_update_constraints + +- name: Assert ecs service constraint + assert: + that: + - ecs_service_update_constraints.changed + - "ecs_service_update_constraints.service.placementConstraints | length == 1" + - "ecs_service_update_constraints.service.placementConstraints[0].type == 'memberOf'" + - "ecs_service_update_constraints.service.placementConstraints[0].expression == 'attribute:ecs.instance-type == t3.micro'" + +- name: Remove ecs service's placement constraints + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-constraint" + cluster: "{{ ecs_cluster_name }}" + force_new_deployment: true + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + purge_placement_constraints: true + desired_count: 1 + state: present + register: ecs_service_remove_constraints + +- name: Assert ecs service constraint + assert: + that: + - ecs_service_remove_constraints.changed + - "ecs_service_remove_constraints.service.placementConstraints | length == 0" + +- name: Create ecs service with placement strategy + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-strategy" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + placement_strategy: + - type: binpack + field: MEMORY + desired_count: 1 + state: present + register: ecs_service_creation_strategy + +- name: Assert ecs service strategy + assert: + that: + - ecs_service_creation_strategy.changed + - "ecs_service_creation_strategy.service.placementStrategy | length == 1" + - "ecs_service_creation_strategy.service.placementStrategy[0].type == 'binpack'" + - "ecs_service_creation_strategy.service.placementStrategy[0].field == 'MEMORY'" + +- name: Update ecs service's placement strategy + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-strategy" + cluster: "{{ ecs_cluster_name }}" + force_new_deployment: true + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + placement_strategy: + - type: spread + field: instanceId + desired_count: 1 + state: present + register: ecs_service_update_strategy + +- name: Assert ecs service strategy + assert: + that: + - ecs_service_update_strategy.changed + - "ecs_service_update_strategy.service.placementStrategy | length == 1" + - "ecs_service_update_strategy.service.placementStrategy[0].type == 'spread'" + - "ecs_service_update_strategy.service.placementStrategy[0].field == 'instanceId'" + +- name: Remove ecs service's placement strategy + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-strategy" + cluster: "{{ ecs_cluster_name }}" + force_new_deployment: true + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + purge_placement_strategy: true + desired_count: 1 + state: present + register: ecs_service_remove_strategy + +- name: Assert ecs service strategy + assert: + that: + - ecs_service_remove_strategy.changed + - "ecs_service_remove_strategy.service.placementStrategy | length == 0" +# ============================================================ +# Begin tests for Fargate + +- name: ensure AmazonECSTaskExecutionRolePolicy exists + iam_role: + name: "{{ ecs_task_role_name }}" + assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" + description: "Allows ECS containers to make calls to ECR" + state: present + create_instance_profile: false + managed_policy: + - AmazonECSTaskExecutionRolePolicy + wait: True + register: iam_execution_role + +- name: pause for iam availability + ansible.builtin.pause: + seconds: 20 + +- name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail) + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: bridge + launch_type: FARGATE + cpu: 512 + memory: 1024 + state: present + vars: + ecs_task_host_port: 8080 + ignore_errors: true + register: ecs_fargate_task_definition_bridged_with_host_port + +- name: check that fargate task definition with bridged networking fails gracefully + assert: + that: + - ecs_fargate_task_definition_bridged_with_host_port is failed + - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"' + +- name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail) + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + launch_type: FARGATE + state: present + ignore_errors: true + register: ecs_fargate_task_definition_vpc_no_mem + +- name: check that fargate task definition without memory or cpu fails gracefully + assert: + that: + - ecs_fargate_task_definition_vpc_no_mem is failed + - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"' + +- name: create Fargate VPC-networked task definition with CPU or Memory and execution role + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + vars: + ecs_task_host_port: 8080 + register: ecs_fargate_task_definition + +- name: create EC2 VPC-networked task definition with CPU or Memory and execution role + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + launch_type: EC2 + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + vars: + ecs_task_host_port: 8080 + register: ecs_ec2_task_definition + +- name: check that changing task definiton launch type created a new task definition revision + assert: + that: + - ecs_fargate_task_definition.taskdefinition.revision != ecs_ec2_task_definition.taskdefinition.revision + +- name: create fargate ECS service without network config (expected to fail) + ecs_service: + state: present + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + launch_type: FARGATE + register: ecs_fargate_service_network_without_awsvpc + ignore_errors: true + +- name: assert that using Fargate ECS service fails + assert: + that: + - ecs_fargate_service_network_without_awsvpc is failed + +- name: create fargate ECS service with network config + ecs_service: + state: present + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + launch_type: FARGATE + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + register: ecs_fargate_service_network_with_awsvpc + +- name: assert that public IP assignment is enabled + assert: + that: + - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"' + +### FIX - run tasks are all failing with CannotPullContainerError in AWS +### So using wait: True fails when waiting for tasks to be started +- name: create fargate ECS task with run task + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + started_by: ansible_user + # wait: true + register: fargate_run_task_output + +- name: Assert changed + assert: + that: + - fargate_run_task_output.changed + +# - name: create fargate ECS task with run task again +# ecs_task: +# operation: run +# cluster: "{{ ecs_cluster_name }}" +# task_definition: "{{ ecs_task_name }}-vpc" +# launch_type: FARGATE +# count: 1 +# network_configuration: +# subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" +# security_groups: +# - '{{ setup_sg.group_id }}' +# assign_public_ip: true +# started_by: ansible_user +# register: fargate_run_task_output + +# - name: Assert no change +# assert: +# that: +# - not fargate_run_task_output.changed + +### This does not fail +- name: create fargate ECS task with run task and tags (LF disabled) (should fail) + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + tags: + tag_key: tag_value + tag_key2: tag_value2 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + started_by: ansible_user + register: fargate_run_task_output_with_tags_fail + ignore_errors: true + +# - name: assert that using Fargate ECS service fails +# assert: +# that: +# - fargate_run_task_output_with_tags_fail is failed + +- name: enable taskLongArnFormat + command: aws ecs put-account-setting --name taskLongArnFormat --value enabled + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + +- name: create fargate ECS task with run task and tags + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + tags: + tag_key: tag_value + tag_key2: tag_value2 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + started_by: ansible_user + register: fargate_run_task_output_with_tags + +- name: create fargate ECS task with run task and assign public ip disable + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: false + started_by: ansible_user + register: fargate_run_task_output_with_assign_ip + + +# ============================================================ +# End tests for Fargate + +- name: create task definition for absent with arn regression test + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-absent" + state: present + register: ecs_task_definition_absent_with_arn + +- name: absent task definition by arn + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}" + state: absent diff --git a/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml b/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml new file mode 100644 index 00000000000..7016f9e70aa --- /dev/null +++ b/tests/integration/targets/ecs_cluster/tasks/99_terminate_everything.yml @@ -0,0 +1,334 @@ +- name: Announce teardown start + debug: + msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" + +- name: remove setup ec2 instance + ec2_instance: + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: true + ignore_errors: true + +- name: obtain ECS service facts + ecs_service_info: + service: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + details: true + register: ecs_service_info + ignore_errors: true + +- name: scale down ECS service + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + ignore_errors: true + register: ecs_service_scale_down + +- name: obtain second ECS service facts + ecs_service_info: + service: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + details: true + ignore_errors: true + register: ecs_service_info + +- name: scale down second ECS service + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + ignore_errors: true + register: ecs_service_scale_down + +- name: scale down multifunction-test service + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-mft" + cluster: "{{ ecs_cluster_name }}" + state: present + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + ignore_errors: true + register: ecs_service_scale_down + +- name: scale down scheduling_strategy service + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + name: "{{ ecs_service_name }}-replica" + cluster: "{{ ecs_cluster_name }}" + state: present + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + ignore_errors: true + register: ecs_service_scale_down + +- name: scale down Fargate ECS service + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + ecs_service: + state: present + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" + desired_count: 0 + ignore_errors: true + register: ecs_service_scale_down + +- name: stop Fargate ECS tasks + ecs_task: + task: "{{ item.task[0].taskArn }}" + task_definition: "{{ ecs_task_name }}-vpc" + operation: stop + cluster: "{{ ecs_cluster_name }}" + wait: true + ignore_errors: true + with_items: + - "{{ fargate_run_task_output }}" + - "{{ fargate_run_task_output_with_tags }}" + - "{{ fargate_run_task_output_with_assign_ip }}" + - "{{ fargate_run_task_output_with_tags_fail }}" + +- name: remove ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove second ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}2" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove mft ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-mft" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove constraints ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-constraint" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove strategy ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-strategy" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove scheduling_strategy ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-replica" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove load balancer ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-lb" + force_deletion: true + wait: true + ignore_errors: true + +- name: remove fargate ECS service + ecs_service: + state: absent + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + force_deletion: true + wait: true + ignore_errors: true + register: ecs_fargate_service_network_with_awsvpc + +- name: remove ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + revision: "{{ ecs_task_definition.taskdefinition.revision }}" + state: absent + vars: + ecs_task_host_port: 8080 + ignore_errors: true + +- name: remove ecs task definition again + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + revision: "{{ ecs_task_definition_again.taskdefinition.revision }}" + state: absent + vars: + ecs_task_host_port: 8080 + ignore_errors: true + +- name: remove second ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" + state: absent + vars: + ecs_task_host_port: 8080 + ignore_errors: true + +- name: remove fargate ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}" + state: absent + ignore_errors: true + +- name: remove ec2 ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + revision: "{{ ecs_ec2_task_definition.taskdefinition.revision }}" + state: absent + ignore_errors: true + +- name: remove ecs task definition for absent with arn + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-absent" + revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}" + state: absent + ignore_errors: true + +- name: remove load balancer + elb_application_lb: + name: "{{ ecs_load_balancer_name }}" + state: absent + wait: true + ignore_errors: true + register: elb_application_lb_remove + +- name: remove setup keypair + ec2_key: + name: '{{ resource_prefix }}_ecs_cluster' + state: absent + ignore_errors: true + +- name: remove ECS cluster + with_items: + - "{{ ecs_cluster_name }}" + - "{{ ecs_cluster_name }}-cps" + ecs_cluster: + name: "{{ item }}" + state: absent + ignore_errors: true + register: this_deletion + +- name: remove security groups + ec2_group: + name: '{{ item }}' + description: 'created by Ansible integration tests' + state: absent + vpc_id: '{{ setup_vpc.vpc.id }}' + with_items: + - '{{ resource_prefix }}_ecs_cluster-sg' + ignore_errors: true + register: this_deletion + retries: 10 + delay: 10 + until: this_deletion is not failed + +- name: remove target groups + elb_target_group: + name: "{{ item }}" + state: absent + with_items: + - "{{ ecs_target_group_name }}1" + - "{{ ecs_target_group_name }}2" + ignore_errors: true + +- name: remove IGW + ec2_vpc_igw: + state: absent + vpc_id: '{{ setup_vpc.vpc.id }}' + ignore_errors: true + +- name: remove setup subnet + ec2_vpc_subnet: + az: '{{ aws_region }}{{ item.zone }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: "{{ item.cidr}}" + state: absent + with_items: + - zone: a + cidr: 10.0.1.0/24 + - zone: b + cidr: 10.0.2.0/24 + ignore_errors: true + +- name: remove setup VPC + ec2_vpc_net: + cidr_block: 10.0.0.0/16 + state: absent + name: '{{ resource_prefix }}_ecs_cluster' + ignore_errors: true + +- name: Delete IAM service role + iam_role: + name: '{{ ecs_service_role_name }}' + state: absent + delete_instance_profile: True + ignore_errors: true + +- name: Delete IAM task execution role + iam_role: + name: '{{ ecs_task_role_name }}' + state: absent + delete_instance_profile: True + ignore_errors: true diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml index a86ecdde3a8..1d27cdc7335 100644 --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -10,1356 +10,9 @@ region: '{{ aws_region }}' block: - - name: ensure IAM instance role exists - iam_role: - name: ecsInstanceRole - assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}" - state: present - create_instance_profile: yes - managed_policy: - - AmazonEC2ContainerServiceforEC2Role - - - name: ensure IAM service role exists - iam_role: - name: ecsServiceRole - assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" - state: present - create_instance_profile: no - managed_policy: - - AmazonEC2ContainerServiceRole - - - name: ensure AWSServiceRoleForECS role exists - iam_role_info: - name: AWSServiceRoleForECS - register: iam_role_result - - # FIXME: come up with a way to automate this - - name: fail if AWSServiceRoleForECS role does not exist - fail: - msg: > - Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create - a linked role for AWS VPC load balancer management - when: not iam_role_result.iam_roles - - - name: create an ECS cluster - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - register: ecs_cluster - - - name: check that ecs_cluster changed - assert: - that: - - ecs_cluster.changed - - - name: create same ECS cluster (should do nothing) - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - register: ecs_cluster_again - - - name: check that ecs_cluster did not change - assert: - that: - - not ecs_cluster_again.changed - - - name: add capacity providers and strategy - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - purge_capacity_providers: True - capacity_providers: - - FARGATE - - FARGATE_SPOT - capacity_provider_strategy: - - capacity_provider: FARGATE - base: 1 - weight: 1 - - capacity_provider: FARGATE_SPOT - weight: 100 - register: ecs_cluster_update - - - name: check that ecs_cluster was correctly updated - assert: - that: - - ecs_cluster_update.changed - - ecs_cluster_update.cluster is defined - - ecs_cluster_update.cluster.capacityProviders is defined - - "'FARGATE' in ecs_cluster_update.cluster.capacityProviders" - - - name: create a VPC to work in - ec2_vpc_net: - cidr_block: 10.0.0.0/16 - state: present - name: '{{ resource_prefix }}_ecs_cluster' - resource_tags: - Name: '{{ resource_prefix }}_ecs_cluster' - register: setup_vpc - - - name: create a key pair to use for creating an ec2 instance - ec2_key: - name: '{{ resource_prefix }}_ecs_cluster' - state: present - when: ec2_keypair is not defined # allow override in cloud-config-aws.ini - register: setup_key - - - name: create subnets - ec2_vpc_subnet: - az: '{{ aws_region }}{{ item.zone }}' - tags: - Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: "{{ item.cidr }}" - state: present - register: setup_subnet - with_items: - - zone: a - cidr: 10.0.1.0/24 - - zone: b - cidr: 10.0.2.0/24 - - - name: create an internet gateway so that ECS agents can talk to ECS - ec2_vpc_igw: - vpc_id: '{{ setup_vpc.vpc.id }}' - state: present - register: igw - - - name: create a security group to use for creating an ec2 instance - ec2_group: - name: '{{ resource_prefix }}_ecs_cluster-sg' - description: 'created by Ansible integration tests' - state: present - vpc_id: '{{ setup_vpc.vpc.id }}' - rules: # allow all ssh traffic but nothing else - - ports: 22 - cidr_ip: 0.0.0.0/0 - register: setup_sg - - - set_fact: - # As a lookup plugin we don't have access to module_defaults - connection_args: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" - no_log: True - - - name: set image id fact - set_fact: - ecs_image_id: "{{ lookup('aws_ssm', '/aws/service/ecs/optimized-ami/amazon-linux-2/recommended/image_id', **connection_args) }}" - - - name: provision ec2 instance to create an image - ec2_instance: - key_name: '{{ ec2_keypair|default(setup_key.key.name) }}' - instance_type: t3.micro - state: present - image_id: '{{ ecs_image_id }}' - wait: yes - user_data: "{{ user_data }}" - instance_role: ecsInstanceRole - tags: - Name: '{{ resource_prefix }}_ecs_agent' - security_group: '{{ setup_sg.group_id }}' - vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}' - register: setup_instance - - - name: create target group - elb_target_group: - name: "{{ ecs_target_group_name }}1" - state: present - protocol: HTTP - port: 8080 - modify_targets: no - vpc_id: '{{ setup_vpc.vpc.id }}' - target_type: instance - health_check_interval: 5 - health_check_timeout: 2 - healthy_threshold_count: 2 - unhealthy_threshold_count: 2 - register: elb_target_group_instance - - - name: create second target group to use ip target_type - elb_target_group: - name: "{{ ecs_target_group_name }}2" - state: present - protocol: HTTP - port: 8080 - modify_targets: no - vpc_id: '{{ setup_vpc.vpc.id }}' - target_type: ip - health_check_interval: 5 - health_check_timeout: 2 - healthy_threshold_count: 2 - unhealthy_threshold_count: 2 - register: elb_target_group_ip - - - name: create load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: present - scheme: internal - security_groups: '{{ setup_sg.group_id }}' - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ ecs_target_group_name }}1" - - Protocol: HTTP - Port: 81 - DefaultActions: - - Type: forward - TargetGroupName: "{{ ecs_target_group_name }}2" - - - name: create task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - state: present - register: ecs_task_definition - - - name: check that initial task definition changes - assert: - that: - - ecs_task_definition.changed - - - name: recreate task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - state: present - register: ecs_task_definition_again - - - name: check that task definition does not change - assert: - that: - - not ecs_task_definition_again.changed - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - - - name: create ECS service definition - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - placement_constraints: - - type: distinctInstance - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service - - - name: check that ECS service creation changed - assert: - that: - - ecs_service.changed - - - name: check that placement constraint has been applied - assert: - that: - - "ecs_service.service.placementConstraints[0].type == 'distinctInstance'" - - - name: check that ECS service was created with deployment_circuit_breaker - assert: - that: - - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.enable - - ecs_service.service.deploymentConfiguration.deploymentCircuitBreaker.rollback - - - name: create same ECS service definition (should not change) - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - placement_constraints: - - type: distinctInstance - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service_again - - - name: check that ECS service recreation changed nothing - assert: - that: - - not ecs_service_again.changed - - - name: create same ECS service definition via force_new_deployment - ecs_service: - state: present - force_new_deployment: yes - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - placement_constraints: - - type: distinctInstance - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service_again - - - name: check that ECS service recreation changed again due force_new_deployment - assert: - that: - - ecs_service_again.changed - - - name: attempt to use ECS network configuration on task definition without awsvpc network_mode (expected to fail) - ecs_service: - state: present - name: "{{ ecs_service_name }}3" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - register: ecs_service_network_without_awsvpc_task - ignore_errors: yes - - - name: assert that using ECS network configuration with non AWSVPC task definition fails - assert: - that: - - ecs_service_network_without_awsvpc_task is failed - - - name: scale down ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - wait: true - register: ecs_service_scale_down - - - name: assert that ECS service is scaled down - assert: - that: - - ecs_service_scale_down.changed - - ecs_service_scale_down.service.desiredCount == 0 - - - name: scale down ECS service again - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service_scale_down - - - name: assert no change - assert: - that: - - not ecs_service_scale_down.changed - - ecs_service_scale_down.service.desiredCount == 0 - - - name: delete ECS service definition - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - wait: yes - register: delete_ecs_service - - - name: assert that deleting ECS service worked - assert: - that: - - delete_ecs_service.changed - - - name: delete ECS service definition again - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - register: delete_ecs_service - - - name: assert no change - assert: - that: - - not delete_ecs_service.changed - - - name: create VPC-networked task definition with host port set to 0 (expected to fail) - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - state: present - network_mode: awsvpc - register: ecs_task_definition_vpc_no_host_port - ignore_errors: yes - - - name: check that awsvpc task definition with host port 0 fails gracefully - assert: - that: - - ecs_task_definition_vpc_no_host_port is failed - - "'error' not in ecs_task_definition_vpc_no_host_port" - - - name: create VPC-networked task definition with host port set to 8080 - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - state: present - vars: - ecs_task_host_port: 8080 - register: ecs_task_definition_vpc_with_host_port - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - register: ecs_taskdefinition_info - - - name: assert that network mode is awsvpc - assert: - that: - - "ecs_taskdefinition_info.network_mode == 'awsvpc'" - - - name: create ECS service definition with network configuration - ecs_service: - state: present - name: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - register: create_ecs_service_with_vpc - - - name: assert that network configuration is correct - assert: - that: - - "'networkConfiguration' in create_ecs_service_with_vpc.service" - - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration" - - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" - - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" - - - name: create ecs_service using health_check_grace_period_seconds - ecs_service: - name: "{{ ecs_service_name }}-mft" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - health_check_grace_period_seconds: 30 - desired_count: 1 - state: present - register: ecs_service_creation_hcgp - - - name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds - assert: - that: - - ecs_service_creation_hcgp.changed - - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 30" - - - name: update ecs_service using health_check_grace_period_seconds - ecs_service: - name: "{{ ecs_service_name }}-mft" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - health_check_grace_period_seconds: 10 - state: present - register: ecs_service_creation_hcgp2 - - - name: check that module returns success - assert: - that: - - ecs_service_creation_hcgp2.changed - - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 10" - - - name: update ecs_service using REPLICA scheduling_strategy - ecs_service: - name: "{{ ecs_service_name }}-replica" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - scheduling_strategy: "REPLICA" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - state: present - register: ecs_service_creation_replica - - - name: obtain facts for all ECS services in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - details: yes - events: no - register: ecs_service_info - - - name: assert that facts are useful - assert: - that: - - "'services' in ecs_service_info" - - ecs_service_info.services | length > 0 - - "'events' not in ecs_service_info.services[0]" - - - name: obtain facts for existing service in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - service: "{{ ecs_service_name }}" - details: yes - events: no - register: ecs_service_info - - - name: assert that existing service is available and running - assert: - that: - - "ecs_service_info.services|length == 1" - - "ecs_service_info.services_not_running|length == 0" - - - name: obtain facts for non-existent service in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - service: madeup - details: yes - events: no - register: ecs_service_info - - - name: assert that non-existent service is missing - assert: - that: - - "ecs_service_info.services_not_running[0].reason == 'MISSING'" - - - name: obtain specific ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - details: yes - register: ecs_service_info - - - name: check that facts contain network configuration - assert: - that: - - "'networkConfiguration' in ecs_service_info.services[0]" - - - name: attempt to get facts from missing task definition - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}" - - - name: Create another task definition with placement constraints - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-constraints" - state: present - placement_constraints: "{{ ecs_taskdefinition_placement_constraints }}" - register: ecs_task_definition_constraints - - - name: Check that task definition has been created - assert: - that: - - ecs_task_definition_constraints is changed - - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == "{{ ecs_taskdefinition_placement_constraints[0].type }}" - - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == "{{ ecs_taskdefinition_placement_constraints[0].expression }}" - - - name: Remove ecs task definition with placement constraints - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - arn: "{{ ecs_task_definition_constraints.taskdefinition.taskDefinitionArn }}" - state: absent - register: ecs_task_definition_constraints_delete - - - name: Check that task definition has been deleted - assert: - that: - - ecs_task_definition_constraints_delete is changed - - - name: Remove ecs task definition with placement constraints again - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - arn: "{{ ecs_task_definition_constraints.taskdefinition.taskDefinitionArn }}" - state: absent - register: ecs_task_definition_constraints_delete - - - name: Assert no change - assert: - that: - - ecs_task_definition_constraints_delete is not changed - - - name: Create ecs_service without load balancer - ecs_service: - name: "{{ ecs_service_name }}-lb" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - desired_count: 1 - state: present - register: ecs_service_create_no_load_balancer - - - name: Check ecs_service does not have load balancer - assert: - that: - - ecs_service_create_no_load_balancer.changed - - "ecs_service_create_no_load_balancer.service.loadBalancers | length == 0" - - - name: Update ecs_service load balancer - ecs_service: - name: "{{ ecs_service_name }}-lb" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - state: present - register: ecs_service_update_load_balancer - - - name: Check ecs_service load balancer updated - assert: - that: - - ecs_service_update_load_balancer.changed - - "ecs_service_update_load_balancer.service.loadBalancers | length == 1" - - "ecs_service_update_load_balancer.service.loadBalancers[0].containerName == {{ ecs_task_name }}" - - "ecs_service_update_load_balancer.service.loadBalancers[0].containerPort == {{ ecs_task_container_port }}" - - "ecs_service_update_load_balancer.service.loadBalancers[0].loadBalancerName == {{ ecs_load_balancer_name }}" - - - name: Create ecs service with placement constraints - ecs_service: - name: "{{ ecs_service_name }}-constraint" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - placement_constraints: - - type: distinctInstance - desired_count: 1 - state: present - register: ecs_service_creation_constraints - - - name: Assert ecs service constraint - assert: - that: - - ecs_service_creation_constraints.changed - - "ecs_service_creation_constraints.service.placementConstraints | length == 1" - - "ecs_service_creation_constraints.service.placementConstraints[0].type == distinctInstance" - - - name: Update ecs service's placement constraints - ecs_service: - name: "{{ ecs_service_name }}-constraint" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - placement_constraints: - - type: memberOf - expression: 'attribute:ecs.instance-type == t3.micro' - desired_count: 1 - state: present - register: ecs_service_update_constraints - - - name: Assert ecs service constraint - assert: - that: - - ecs_service_update_constraints.changed - - "ecs_service_update_constraints.service.placementConstraints | length == 1" - - "ecs_service_update_constraints.service.placementConstraints[0].type == memberOf" - - "ecs_service_update_constraints.service.placementConstraints[0].expression == 'attribute:ecs.instance-type == t3.micro'" - - - name: Remove ecs service's placement constraints - ecs_service: - name: "{{ ecs_service_name }}-constraint" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - desired_count: 1 - state: present - register: ecs_service_remove_constraints - - - name: Assert ecs service constraint - assert: - that: - - ecs_service_remove_constraints.changed - - "ecs_service_remove_constraints.service.placementConstraints | length == 0" - - - name: Create ecs service with placement strategy - ecs_service: - name: "{{ ecs_service_name }}-strategy" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - placement_strategy: - - type: binpack - field: MEMORY - desired_count: 1 - state: present - register: ecs_service_creation_strategy - - - name: Assert ecs service strategy - assert: - that: - - ecs_service_creation_strategy.changed - - "ecs_service_creation_strategy.service.placementStrategy | length == 1" - - "ecs_service_creation_strategy.service.placementStrategy[0].type == binpack" - - "ecs_service_creation_strategy.service.placementStrategy[0].field == MEMORY" - - - name: Update ecs service's placement strategy - ecs_service: - name: "{{ ecs_service_name }}-strategy" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - placement_strategy: - - type: spread - field: instanceId - desired_count: 1 - state: present - register: ecs_service_update_strategy - - - name: Assert ecs service strategy - assert: - that: - - ecs_service_update_strategy.changed - - "ecs_service_update_strategy.service.placementStrategy | length == 1" - - "ecs_service_update_strategy.service.placementStrategy[0].type == spread" - - "ecs_service_update_strategy.service.placementStrategy[0].field == instanceId" - - - name: Remove ecs service's placement strategy - ecs_service: - name: "{{ ecs_service_name }}-strategy" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - desired_count: 1 - state: present - register: ecs_service_remove_strategy - - - name: Assert ecs service strategy - assert: - that: - - ecs_service_remove_strategy.changed - - "ecs_service_remove_strategy.service.placementStrategy | length == 0" - # ============================================================ - # Begin tests for Fargate - - - name: ensure AmazonECSTaskExecutionRolePolicy exists - iam_role: - name: ecsTaskExecutionRole - assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" - description: "Allows ECS containers to make calls to ECR" - state: present - create_instance_profile: no - managed_policy: - - AmazonEC2ContainerServiceRole - register: iam_execution_role - - - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail) - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: bridge - launch_type: FARGATE - cpu: 512 - memory: 1024 - state: present - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - register: ecs_fargate_task_definition_bridged_with_host_port - - - name: check that fargate task definition with bridged networking fails gracefully - assert: - that: - - ecs_fargate_task_definition_bridged_with_host_port is failed - - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"' - - - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail) - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - launch_type: FARGATE - state: present - ignore_errors: yes - register: ecs_fargate_task_definition_vpc_no_mem - - - name: check that fargate task definition without memory or cpu fails gracefully - assert: - that: - - ecs_fargate_task_definition_vpc_no_mem is failed - - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"' - - - name: create Fargate VPC-networked task definition with CPU or Memory and execution role - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - launch_type: FARGATE - cpu: 512 - memory: 1024 - execution_role_arn: "{{ iam_execution_role.arn }}" - state: present - vars: - ecs_task_host_port: 8080 - register: ecs_fargate_task_definition - - - name: create EC2 VPC-networked task definition with CPU or Memory and execution role - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - launch_type: EC2 - cpu: 512 - memory: 1024 - execution_role_arn: "{{ iam_execution_role.arn }}" - state: present - vars: - ecs_task_host_port: 8080 - register: ecs_ec2_task_definition - - - name: check that changing task definiton launch type created a new task definition revision - assert: - that: - - ecs_fargate_task_definition.taskdefinition.revision != ecs_ec2_task_definition.taskdefinition.revision - - - name: create fargate ECS service without network config (expected to fail) - ecs_service: - state: present - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - launch_type: FARGATE - register: ecs_fargate_service_network_without_awsvpc - ignore_errors: yes - - - name: assert that using Fargate ECS service fails - assert: - that: - - ecs_fargate_service_network_without_awsvpc is failed - - - name: create fargate ECS service with network config - ecs_service: - state: present - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - launch_type: FARGATE - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - register: ecs_fargate_service_network_with_awsvpc - - - name: assert that public IP assignment is enabled - assert: - that: - - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"' - - ### FIX - run tasks are all failing with CannotPullContainerError in AWS - ### So using wait: True fails when waiting for tasks to be started - - name: create fargate ECS task with run task - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - started_by: ansible_user - # wait: yes - register: fargate_run_task_output - - - name: Assert changed - assert: - that: - - fargate_run_task_output.changed - - # - name: create fargate ECS task with run task again - # ecs_task: - # operation: run - # cluster: "{{ ecs_cluster_name }}" - # task_definition: "{{ ecs_task_name }}-vpc" - # launch_type: FARGATE - # count: 1 - # network_configuration: - # subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - # security_groups: - # - '{{ setup_sg.group_id }}' - # assign_public_ip: true - # started_by: ansible_user - # register: fargate_run_task_output - - # - name: Assert no change - # assert: - # that: - # - not fargate_run_task_output.changed - - ### This does not fail - - name: create fargate ECS task with run task and tags (LF disabled) (should fail) - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - tags: - tag_key: tag_value - tag_key2: tag_value2 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - started_by: ansible_user - register: fargate_run_task_output_with_tags_fail - ignore_errors: yes - - # - name: assert that using Fargate ECS service fails - # assert: - # that: - # - fargate_run_task_output_with_tags_fail is failed - - - name: enable taskLongArnFormat - command: aws ecs put-account-setting --name taskLongArnFormat --value enabled - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" - - - name: create fargate ECS task with run task and tags - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - tags: - tag_key: tag_value - tag_key2: tag_value2 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - started_by: ansible_user - register: fargate_run_task_output_with_tags - - - name: create fargate ECS task with run task and assign public ip disable - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: false - started_by: ansible_user - register: fargate_run_task_output_with_assign_ip - - - # ============================================================ - # End tests for Fargate - - - name: create task definition for absent with arn regression test - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-absent" - state: present - register: ecs_task_definition_absent_with_arn - - - name: absent task definition by arn - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}" - state: absent + - include: 01_create_requirements.yml + - include: 10_ecs_cluster.yml + - include: 20_ecs_service.yml always: - # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc - - name: Announce teardown start - debug: - msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" - - - name: remove setup ec2 instance - ec2_instance: - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - wait: yes - ignore_errors: yes - - - name: obtain ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - details: yes - register: ecs_service_info - - - name: scale down ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - ignore_errors: yes - register: ecs_service_scale_down - - - name: obtain second ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - details: yes - ignore_errors: yes - register: ecs_service_info - - - name: scale down second ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - ignore_errors: yes - register: ecs_service_scale_down - - - name: scale down multifunction-test service - ecs_service: - name: "{{ ecs_service_name }}-mft" - cluster: "{{ ecs_cluster_name }}" - state: present - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - ignore_errors: yes - register: ecs_service_scale_down - - - name: scale down scheduling_strategy service - ecs_service: - name: "{{ ecs_service_name }}-replica" - cluster: "{{ ecs_cluster_name }}" - state: present - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - ignore_errors: yes - register: ecs_service_scale_down - - - name: scale down Fargate ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - ignore_errors: yes - register: ecs_service_scale_down - - - name: stop Fargate ECS tasks - ecs_task: - task: "{{ item.task[0].taskArn }}" - task_definition: "{{ ecs_task_name }}-vpc" - operation: stop - cluster: "{{ ecs_cluster_name }}" - wait: yes - ignore_errors: yes - with_items: - - "{{ fargate_run_task_output }}" - - "{{ fargate_run_task_output_with_tags }}" - - "{{ fargate_run_task_output_with_assign_ip }}" - - "{{ fargate_run_task_output_with_tags_fail }}" - - - name: remove ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove second ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}2" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove mft ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-mft" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove constraints ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-constraint" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove strategy ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-strategy" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove scheduling_strategy ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-replica" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove load balancer ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-lb" - force_deletion: yes - wait: yes - ignore_errors: yes - - - name: remove fargate ECS service - ecs_service: - state: absent - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - force_deletion: yes - wait: yes - ignore_errors: yes - register: ecs_fargate_service_network_with_awsvpc - - - name: remove ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - revision: "{{ ecs_task_definition.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove ecs task definition again - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - revision: "{{ ecs_task_definition_again.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove second ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove fargate ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove ec2 ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_ec2_task_definition.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove ecs task definition for absent with arn - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-absent" - revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: absent - wait: yes - ignore_errors: yes - register: elb_application_lb_remove - - - name: remove setup keypair - ec2_key: - name: '{{ resource_prefix }}_ecs_cluster' - state: absent - ignore_errors: yes - - - name: remove ECS cluster - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: absent - ignore_errors: yes - register: this_deletion - - - name: remove security groups - ec2_group: - name: '{{ item }}' - description: 'created by Ansible integration tests' - state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - with_items: - - '{{ resource_prefix }}_ecs_cluster-sg' - ignore_errors: yes - register: this_deletion - retries: 10 - delay: 10 - until: this_deletion is not failed - - - name: remove target groups - elb_target_group: - name: "{{ item }}" - state: absent - with_items: - - "{{ ecs_target_group_name }}1" - - "{{ ecs_target_group_name }}2" - ignore_errors: yes - - - name: remove IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - ignore_errors: yes - - - name: remove setup subnet - ec2_vpc_subnet: - az: '{{ aws_region }}{{ item.zone }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: "{{ item.cidr}}" - state: absent - with_items: - - zone: a - cidr: 10.0.1.0/24 - - zone: b - cidr: 10.0.2.0/24 - ignore_errors: yes - - - name: remove setup VPC - ec2_vpc_net: - cidr_block: 10.0.0.0/16 - state: absent - name: '{{ resource_prefix }}_ecs_cluster' - ignore_errors: yes + - include: 99_terminate_everything.yml