From 35f4f5921f39fe85d1af0fb1f96e6084678c31b4 Mon Sep 17 00:00:00 2001 From: Markus Bergholz Date: Mon, 23 May 2022 20:02:01 +0200 Subject: [PATCH] fix ecs_cluster integration test (#1145) fix ecs_cluster integration test SUMMARY ecs_cluster: make ecs_cluster integration test work again - as it is bugs I've hit and must be fixed to complete this challenge ecs_taskdefinition: fix change detection of changing launch_type parameter ecs_service: compare of task_definition never works and results always in a changed task change detect of health_check_grace_period_seconds was never implemented, but tested and failing, after the task_definition is compared correctly ref: #1142 ISSUE TYPE Bugfix Pull Request COMPONENT NAME ecs_taskdefinition ecs_service ADDITIONAL INFORMATION basically the existing test tasks are not changed. just sorted and removed what was marked as fixme because it's simple not possible (changing network settings of a created service). Reviewed-by: Alina Buzachis Reviewed-by: Joseph Torcasso Reviewed-by: Mark Chappell Reviewed-by: Markus Bergholz --- .../fragments/0000-ecs_taskdefinition_fix.yml | 4 + plugins/modules/ecs_service.py | 9 +- plugins/modules/ecs_taskdefinition.py | 2 +- .../tasks/force_service_deletion.yml | 766 ----------- .../targets/ecs_cluster/tasks/full_test.yml | 1139 ----------------- .../targets/ecs_cluster/tasks/main.yml | 1031 ++++++++++++++- .../tasks/network_force_new_deployment.yml | 107 -- 7 files changed, 1041 insertions(+), 2017 deletions(-) create mode 100644 changelogs/fragments/0000-ecs_taskdefinition_fix.yml delete mode 100644 tests/integration/targets/ecs_cluster/tasks/force_service_deletion.yml delete mode 100644 tests/integration/targets/ecs_cluster/tasks/full_test.yml delete mode 100644 tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml diff --git a/changelogs/fragments/0000-ecs_taskdefinition_fix.yml b/changelogs/fragments/0000-ecs_taskdefinition_fix.yml new file mode 100644 index 00000000000..0f11ebd2f0b --- /dev/null +++ b/changelogs/fragments/0000-ecs_taskdefinition_fix.yml @@ -0,0 +1,4 @@ +bugfixes: + - ecs_taskdefinition - fix broken change detect of ``launch_type`` parameter (https://github.com/ansible-collections/community.aws/pull/1145). + - ecs_service - fix broken compare of ``task_definition`` that results always in a changed task (https://github.com/ansible-collections/community.aws/pull/1145). + - ecs_service - add missing change detect of ``health_check_grace_period_seconds`` parameter (https://github.com/ansible-collections/community.aws/pull/1145). \ No newline at end of file diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py index 8e7adbcacc2..66f20b63d81 100644 --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -549,7 +549,14 @@ def describe_service(self, cluster_name, service_name): raise Exception("Unknown problem describing service %s." % service_name) def is_matching_service(self, expected, existing): - if expected['task_definition'] != existing['taskDefinition']: + # aws returns the arn of the task definition + # arn:aws:ecs:eu-central-1:123456789:task-definition/ansible-fargate-nginx:3 + # but the user is just entering + # ansible-fargate-nginx:3 + if expected['task_definition'] != existing['taskDefinition'].split('/')[-1]: + return False + + if expected.get('health_check_grace_period_seconds') != existing.get('healthCheckGracePeriodSeconds'): return False if (expected['load_balancers'] or []) != existing['loadBalancers']: diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py index 34574aae4ed..1c0c863750d 100644 --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -956,7 +956,7 @@ def _task_definition_matches(requested_volumes, requested_containers, requested_ if requested_task_role_arn != td.get('taskRoleArn', ""): return None - if requested_launch_type is not None and requested_launch_type not in td.get('compatibilities', []): + if requested_launch_type is not None and requested_launch_type not in td.get('requiresCompatibilities', []): return None existing_volumes = td.get('volumes', []) or [] diff --git a/tests/integration/targets/ecs_cluster/tasks/force_service_deletion.yml b/tests/integration/targets/ecs_cluster/tasks/force_service_deletion.yml deleted file mode 100644 index 058dc3173f0..00000000000 --- a/tests/integration/targets/ecs_cluster/tasks/force_service_deletion.yml +++ /dev/null @@ -1,766 +0,0 @@ ---- -# tasks file for ecs_cluster -- name: ecs_cluster tests - collections: - - amazon.aws - - block: - - name: ensure IAM instance role exists - iam_role: - name: ecsInstanceRole - assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}" - state: present - create_instance_profile: yes - managed_policy: - - AmazonEC2ContainerServiceforEC2Role - - - name: ensure IAM service role exists - iam_role: - name: ecsServiceRole - assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" - state: present - create_instance_profile: no - managed_policy: - - AmazonEC2ContainerServiceRole - - - name: ensure AWSServiceRoleForECS role exists - iam_role_info: - name: AWSServiceRoleForECS - register: iam_role_result - - # FIXME: come up with a way to automate this - - name: fail if AWSServiceRoleForECS role does not exist - fail: - msg: > - Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create - a linked role for AWS VPC load balancer management - when: not iam_role_result.iam_roles - - - name: create an ECS cluster - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - register: ecs_cluster - - - name: check that ecs_cluster changed - assert: - that: - - ecs_cluster.changed - - - name: create same ECS cluster (should do nothing) - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - register: ecs_cluster_again - - - name: check that ecs_cluster did not change - assert: - that: - - not ecs_cluster_again.changed - - - name: create a VPC to work in - ec2_vpc_net: - cidr_block: 10.0.0.0/16 - state: present - name: '{{ resource_prefix }}_ecs_cluster' - resource_tags: - Name: '{{ resource_prefix }}_ecs_cluster' - register: setup_vpc - - - name: create a key pair to use for creating an ec2 instance - ec2_key: - name: '{{ resource_prefix }}_ecs_cluster' - state: present - when: ec2_keypair is not defined # allow override in cloud-config-aws.ini - register: setup_key - - - name: create subnets - ec2_vpc_subnet: - az: '{{ ec2_region }}{{ item.zone }}' - tags: - Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: "{{ item.cidr }}" - state: present - register: setup_subnet - with_items: - - zone: a - cidr: 10.0.1.0/24 - - zone: b - cidr: 10.0.2.0/24 - - - name: create an internet gateway so that ECS agents can talk to ECS - ec2_vpc_igw: - vpc_id: '{{ setup_vpc.vpc.id }}' - state: present - register: igw - - - name: create a security group to use for creating an ec2 instance - ec2_group: - name: '{{ resource_prefix }}_ecs_cluster-sg' - description: 'created by Ansible integration tests' - state: present - vpc_id: '{{ setup_vpc.vpc.id }}' - rules: # allow all ssh traffic but nothing else - - ports: 22 - cidr: 0.0.0.0/0 - register: setup_sg - - - name: find a suitable AMI - ec2_ami_info: - owner: amazon - filters: - description: "Amazon Linux AMI* ECS *" - register: ec2_ami_info - - - name: set image id fact - set_fact: - ecs_image_id: "{{ (ec2_ami_info.images|first).image_id }}" - - - name: provision ec2 instance to create an image - ec2_instance: - key_name: '{{ ec2_keypair|default(setup_key.key.name) }}' - instance_type: t2.micro - state: present - image_id: '{{ ecs_image_id }}' - wait: yes - user_data: "{{ user_data }}" - instance_role: ecsInstanceRole - tags: - Name: '{{ resource_prefix }}_ecs_agent' - security_group: '{{ setup_sg.group_id }}' - vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}' - register: setup_instance - - - name: create target group - elb_target_group: - name: "{{ ecs_target_group_name }}1" - state: present - protocol: HTTP - port: 8080 - modify_targets: no - vpc_id: '{{ setup_vpc.vpc.id }}' - target_type: instance - register: elb_target_group_instance - - - name: create second target group to use ip target_type - elb_target_group: - name: "{{ ecs_target_group_name }}2" - state: present - protocol: HTTP - port: 8080 - modify_targets: no - vpc_id: '{{ setup_vpc.vpc.id }}' - target_type: ip - register: elb_target_group_ip - - - name: create load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: present - scheme: internal - security_groups: '{{ setup_sg.group_id }}' - subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}" - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ ecs_target_group_name }}1" - - Protocol: HTTP - Port: 81 - DefaultActions: - - Type: forward - TargetGroupName: "{{ ecs_target_group_name }}2" - - - name: create task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - state: present - register: ecs_task_definition - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - - - name: create ECS service definition - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service - - - name: check that ECS service creation changed - assert: - that: - - ecs_service.changed - -# TODO - -# - name: scale down ECS service -# ecs_service: -# state: present -# name: "{{ ecs_service_name }}" -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" -# desired_count: 0 -# deployment_configuration: "{{ ecs_service_deployment_configuration }}" -# placement_strategy: "{{ ecs_service_placement_strategy }}" -# load_balancers: -# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" -# containerName: "{{ ecs_task_name }}" -# containerPort: "{{ ecs_task_container_port }}" -# role: "ecsServiceRole" -# register: ecs_service_scale_down -# -# - name: pause to allow service to scale down -# pause: -# seconds: 60 - - - name: delete ECS service definition even while service is scaled - Should fail - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - ignore_errors: yes - register: delete_ecs_service_fail - - - name: assert that updating ECS load balancer failed with helpful message - assert: - that: - - delete_ecs_service_fail is failed - - - name: delete ECS service definition even while service is scaled - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - force_deletion: yes - register: delete_ecs_service - - - name: assert that deleting ECS service worked - assert: - that: - - delete_ecs_service.changed - - - name: create ECS service definition - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service - - - name: remove target groups from load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: present - scheme: internal - security_groups: '{{ setup_sg.group_id }}' - subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}" - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: fixed-response - FixedResponseConfig: - ContentType: "text/plain" - MessageBody: "This is the page you're looking for" - StatusCode: "200" - - Protocol: HTTP - Port: 81 - DefaultActions: - - Type: fixed-response - FixedResponseConfig: - ContentType: "text/plain" - MessageBody: "This is the page you're looking for" - StatusCode: "200" - - - name: pause to allow target groups to refresh into no-lb state - pause: - seconds: 10 - - - name: delete in-use targetGroup - elb_target_group: - state: absent - name: "{{ ecs_target_group_name }}1" - - - name: delete in-use targetGroup - elb_target_group: - state: absent - name: "{{ ecs_target_group_name }}2" - - - name: delete ECS service definition even while service is scaled - Should fail - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - force_deletion: yes - register: delete_ecs_service_fail - - - name: assert that updating ECS load balancer failed with helpful message - assert: - that: - - delete_ecs_service_fail is failed - - - name: delete ECS service definition even while service is scaled - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - force_deletion: yes - register: delete_ecs_service - - - name: obtain facts for all ECS services in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - details: yes - events: no - register: ecs_service_info - - - name: assert that facts are useful - assert: - that: - - "'services' in ecs_service_info" - - ecs_service_info.services | length > 0 - - "'events' not in ecs_service_info.services[0]" - - - name: obtain facts for existing service in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - service: "{{ ecs_service_name }}" - details: yes - events: no - register: ecs_service_info - - - name: assert that existing service is available and running - assert: - that: - - "ecs_service_info.services|length == 1" - - "ecs_service_info.services_not_running|length == 0" - - - name: obtain facts for non-existent service in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - service: madeup - details: yes - events: no - register: ecs_service_info - - - name: assert that non-existent service is missing - assert: - that: - - "ecs_service_info.services_not_running[0].reason == 'MISSING'" - - - name: obtain specific ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - details: yes - register: ecs_service_info - -# -# # ============================================================ -# # Begin tests for Fargate -# -# - name: ensure AmazonECSTaskExecutionRolePolicy exists -# iam_role: -# name: ecsTaskExecutionRole -# assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" -# description: "Allows ECS containers to make calls to ECR" -# state: present -# create_instance_profile: no -# managed_policy: -# - AmazonEC2ContainerServiceRole -# register: iam_execution_role -# -# - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail) -# ecs_taskdefinition: -# containers: "{{ ecs_fargate_task_containers }}" -# family: "{{ ecs_task_name }}-vpc" -# network_mode: bridge -# launch_type: FARGATE -# cpu: 512 -# memory: 1024 -# state: present -# vars: -# ecs_task_host_port: 8080 -# ignore_errors: yes -# register: ecs_fargate_task_definition_bridged_with_host_port -# -# - name: check that fargate task definition with bridged networking fails gracefully -# assert: -# that: -# - ecs_fargate_task_definition_bridged_with_host_port is failed -# - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"' -# -# - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail) -# ecs_taskdefinition: -# containers: "{{ ecs_fargate_task_containers }}" -# family: "{{ ecs_task_name }}-vpc" -# network_mode: awsvpc -# launch_type: FARGATE -# state: present -# ignore_errors: yes -# register: ecs_fargate_task_definition_vpc_no_mem -# -# - name: check that fargate task definition without memory or cpu fails gracefully -# assert: -# that: -# - ecs_fargate_task_definition_vpc_no_mem is failed -# - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"' -# -# - name: create Fargate VPC-networked task definition with CPU or Memory and execution role -# ecs_taskdefinition: -# containers: "{{ ecs_fargate_task_containers }}" -# family: "{{ ecs_task_name }}-vpc" -# network_mode: awsvpc -# launch_type: FARGATE -# cpu: 512 -# memory: 1024 -# execution_role_arn: "{{ iam_execution_role.arn }}" -# state: present -# vars: -# ecs_task_host_port: 8080 -# register: ecs_fargate_task_definition -# -# - name: obtain ECS task definition facts -# ecs_taskdefinition_info: -# task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" -# -# - name: create fargate ECS service without network config (expected to fail) -# ecs_service: -# state: present -# name: "{{ ecs_service_name }}4" -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" -# desired_count: 1 -# deployment_configuration: "{{ ecs_service_deployment_configuration }}" -# launch_type: FARGATE -# register: ecs_fargate_service_network_without_awsvpc -# ignore_errors: yes -# -# - name: assert that using Fargate ECS service fails -# assert: -# that: -# - ecs_fargate_service_network_without_awsvpc is failed -# -# - name: create fargate ECS service with network config -# ecs_service: -# state: present -# name: "{{ ecs_service_name }}4" -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" -# desired_count: 1 -# deployment_configuration: "{{ ecs_service_deployment_configuration }}" -# launch_type: FARGATE -# network_configuration: -# subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}" -# security_groups: -# - '{{ setup_sg.group_id }}' -# assign_public_ip: true -# register: ecs_fargate_service_network_with_awsvpc -# -# - name: assert that public IP assignment is enabled -# assert: -# that: -# - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"' -# -# - name: create fargate ECS task with run task -# ecs_task: -# operation: run -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}-vpc" -# launch_type: FARGATE -# count: 1 -# network_configuration: -# subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}" -# security_groups: -# - '{{ setup_sg.group_id }}' -# assign_public_ip: true -# started_by: ansible_user -# register: fargate_run_task_output -# -# # aws cli not installed in docker container; make sure it's installed. -# - name: install awscli -# pip: -# state: present -# name: awscli -# -# - name: disable taskLongArnFormat -# command: aws ecs put-account-setting --name taskLongArnFormat --value disabled -# environment: -# AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" -# AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" -# AWS_SESSION_TOKEN: "{{ security_token | default('') }}" -# AWS_DEFAULT_REGION: "{{ aws_region }}" -# -# - name: create fargate ECS task with run task and tags (LF disabled) (should fail) -# ecs_task: -# operation: run -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}-vpc" -# launch_type: FARGATE -# count: 1 -# tags: -# tag_key: tag_value -# tag_key2: tag_value2 -# network_configuration: -# subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}" -# security_groups: -# - '{{ setup_sg.group_id }}' -# assign_public_ip: true -# started_by: ansible_user -# register: fargate_run_task_output_with_tags_fail -# ignore_errors: yes -# -# - name: enable taskLongArnFormat -# command: aws ecs put-account-setting --name taskLongArnFormat --value enabled -# environment: -# AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" -# AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" -# AWS_SESSION_TOKEN: "{{ security_token | default('') }}" -# AWS_DEFAULT_REGION: "{{ aws_region }}" -# -# - name: create fargate ECS task with run task and tags -# ecs_task: -# operation: run -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}-vpc" -# launch_type: FARGATE -# count: 1 -# tags: -# tag_key: tag_value -# tag_key2: tag_value2 -# network_configuration: -# subnets: "{{ setup_subnet.results | community.general.json_query('[].subnet.id') }}" -# security_groups: -# - '{{ setup_sg.group_id }}' -# assign_public_ip: true -# started_by: ansible_user -# register: fargate_run_task_output_with_tags - - - # ============================================================ - # End tests for Fargate -# -# -# - name: scale down Fargate ECS service -# ecs_service: -# state: present -# name: "{{ ecs_service_name }}4" -# cluster: "{{ ecs_cluster_name }}" -# task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" -# desired_count: 0 -# deployment_configuration: "{{ ecs_service_deployment_configuration }}" -# ignore_errors: yes -# register: ecs_service_scale_down -# -# - name: stop Fargate ECS task -# ecs_task: -# task: "{{ fargate_run_task_output.task[0].taskArn }}" -# task_definition: "{{ ecs_task_name }}-vpc" -# operation: stop -# cluster: "{{ ecs_cluster_name }}" -# ignore_errors: yes -# -# - name: stop Fargate ECS task -# ecs_task: -# task: "{{ fargate_run_task_output_with_tags.task[0].taskArn }}" -# task_definition: "{{ ecs_task_name }}-vpc" -# operation: stop -# cluster: "{{ ecs_cluster_name }}" -# ignore_errors: yes -# - name: pause to allow services to scale down -# pause: -# seconds: 60 -# when: ecs_service_scale_down is not failed - - - name: remove ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}" - ignore_errors: yes - - - name: remove second ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}2" - ignore_errors: yes - - - name: remove mft ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-mft" - ignore_errors: yes - - - name: remove scheduling_strategy ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-replica" - ignore_errors: yes - -# until ansible supports service registries, the test for it can't run and this -# removal is not needed -# - name: remove service_registries ecs service -# ecs_service: -# state: absent -# cluster: "{{ ecs_cluster_name }}" -# name: "{{ ecs_service_name }}-service-registries" -# ignore_errors: yes - - - name: remove fargate ECS service - ecs_service: - state: absent - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - ignore_errors: yes - register: ecs_fargate_service_network_with_awsvpc - - - name: remove ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - revision: "{{ ecs_task_definition.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove ecs task definition again - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - revision: "{{ ecs_task_definition_again.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove second ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove fargate ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove ecs task definition for absent with arn - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-absent" - revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: absent - wait: yes - ignore_errors: yes - register: elb_application_lb_remove - - - name: pause to allow target group to be disassociated - pause: - seconds: 30 - when: not elb_application_lb_remove is failed - - - name: remove target groups - elb_target_group: - name: "{{ item }}" - state: absent - with_items: - - "{{ ecs_target_group_name }}1" - - "{{ ecs_target_group_name }}2" - ignore_errors: yes - - - name: remove setup ec2 instance - ec2_instance: - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - wait: yes - ignore_errors: yes - - - name: remove setup keypair - ec2_key: - name: '{{ resource_prefix }}_ecs_cluster' - state: absent - ignore_errors: yes - - - name: remove security groups - ec2_group: - name: '{{ item }}' - description: 'created by Ansible integration tests' - state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - with_items: - - "{{ resource_prefix }}-ecs-vpc-test-sg" - - '{{ resource_prefix }}_ecs_cluster-sg' - ignore_errors: yes - - - name: remove IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - ignore_errors: yes - - - name: remove setup subnet - ec2_vpc_subnet: - az: '{{ aws_region }}{{ item.zone }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: "{{ item.cidr}}" - state: absent - with_items: - - zone: a - cidr: 10.0.1.0/24 - - zone: b - cidr: 10.0.2.0/24 - ignore_errors: yes - - - name: remove setup VPC - ec2_vpc_net: - cidr_block: 10.0.0.0/16 - state: absent - name: '{{ resource_prefix }}_ecs_cluster' - ignore_errors: yes - - - name: remove ECS cluster - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: absent - ignore_errors: yes diff --git a/tests/integration/targets/ecs_cluster/tasks/full_test.yml b/tests/integration/targets/ecs_cluster/tasks/full_test.yml deleted file mode 100644 index 88e29cee7f8..00000000000 --- a/tests/integration/targets/ecs_cluster/tasks/full_test.yml +++ /dev/null @@ -1,1139 +0,0 @@ ---- -- block: - - - name: ensure IAM instance role exists - iam_role: - name: ecsInstanceRole - assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}" - state: present - create_instance_profile: yes - managed_policy: - - AmazonEC2ContainerServiceforEC2Role - - - name: ensure IAM service role exists - iam_role: - name: ecsServiceRole - assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" - state: present - create_instance_profile: no - managed_policy: - - AmazonEC2ContainerServiceRole - - - name: ensure AWSServiceRoleForECS role exists - iam_role_info: - name: AWSServiceRoleForECS - register: iam_role_result - - # FIXME: come up with a way to automate this - - name: fail if AWSServiceRoleForECS role does not exist - fail: - msg: > - Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create - a linked role for AWS VPC load balancer management - when: not iam_role_result.iam_roles - - - name: create an ECS cluster - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - register: ecs_cluster - - - name: check that ecs_cluster changed - assert: - that: - - ecs_cluster.changed - - - name: create same ECS cluster (should do nothing) - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: present - register: ecs_cluster_again - - - name: check that ecs_cluster did not change - assert: - that: - - not ecs_cluster_again.changed - - - name: create a VPC to work in - ec2_vpc_net: - cidr_block: 10.0.0.0/16 - state: present - name: '{{ resource_prefix }}_ecs_cluster' - resource_tags: - Name: '{{ resource_prefix }}_ecs_cluster' - register: setup_vpc - - - name: create a key pair to use for creating an ec2 instance - ec2_key: - name: '{{ resource_prefix }}_ecs_cluster' - state: present - when: ec2_keypair is not defined # allow override in cloud-config-aws.ini - register: setup_key - - - name: create subnets - ec2_vpc_subnet: - az: '{{ aws_region }}{{ item.zone }}' - tags: - Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: "{{ item.cidr }}" - state: present - register: setup_subnet - with_items: - - zone: a - cidr: 10.0.1.0/24 - - zone: b - cidr: 10.0.2.0/24 - - - name: create an internet gateway so that ECS agents can talk to ECS - ec2_vpc_igw: - vpc_id: '{{ setup_vpc.vpc.id }}' - state: present - register: igw - - - name: create a security group to use for creating an ec2 instance - ec2_group: - name: '{{ resource_prefix }}_ecs_cluster-sg' - description: 'created by Ansible integration tests' - state: present - vpc_id: '{{ setup_vpc.vpc.id }}' - rules: # allow all ssh traffic but nothing else - - ports: 22 - cidr: 0.0.0.0/0 - register: setup_sg - - - name: find a suitable AMI - ec2_ami_info: - owner: amazon - filters: - description: "Amazon Linux AMI* ECS *" - register: ec2_ami_info - - - name: set image id fact - set_fact: - ecs_image_id: "{{ (ec2_ami_info.images|first).image_id }}" - - - name: provision ec2 instance to create an image - ec2_instance: - key_name: '{{ ec2_keypair|default(setup_key.key.name) }}' - instance_type: t3.micro - state: present - image_id: '{{ ecs_image_id }}' - wait: yes - user_data: "{{ user_data }}" - instance_role: ecsInstanceRole - tags: - Name: '{{ resource_prefix }}_ecs_agent' - security_group: '{{ setup_sg.group_id }}' - vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}' - register: setup_instance - - - name: create target group - elb_target_group: - name: "{{ ecs_target_group_name }}1" - state: present - protocol: HTTP - port: 8080 - modify_targets: no - vpc_id: '{{ setup_vpc.vpc.id }}' - target_type: instance - register: elb_target_group_instance - - - name: create second target group to use ip target_type - elb_target_group: - name: "{{ ecs_target_group_name }}2" - state: present - protocol: HTTP - port: 8080 - modify_targets: no - vpc_id: '{{ setup_vpc.vpc.id }}' - target_type: ip - register: elb_target_group_ip - - - name: create load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: present - scheme: internal - security_groups: '{{ setup_sg.group_id }}' - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: "{{ ecs_target_group_name }}1" - - Protocol: HTTP - Port: 81 - DefaultActions: - - Type: forward - TargetGroupName: "{{ ecs_target_group_name }}2" - - - name: create task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - state: present - register: ecs_task_definition - - - name: recreate task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - state: present - register: ecs_task_definition_again - - - name: check that task definition does not change - assert: - that: - - not ecs_task_definition_again.changed - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - - - name: create ECS service definition - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service - - - name: check that ECS service creation changed - assert: - that: - - ecs_service.changed - - - name: create same ECS service definition (should not change) - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service_again - - - name: check that ECS service recreation changed nothing - assert: - that: - - not ecs_service_again.changed - # FIXME: service should not change, needs fixing - ignore_errors: yes - - # FIXME: attempt to update service load balancer - - name: update ECS service definition (expected to fail) - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port|int + 1 }}" - role: "ecsServiceRole" - register: update_ecs_service - ignore_errors: yes - - - name: assert that updating ECS load balancer failed with helpful message - assert: - that: - - update_ecs_service is failed - - "'error' not in update_ecs_service" - - "'msg' in update_ecs_service" - - - - name: attempt to use ECS network configuration on task definition without awsvpc network_mode - ecs_service: - state: present - name: "{{ ecs_service_name }}3" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - register: ecs_service_network_without_awsvpc_task - ignore_errors: yes - - - name: assert that using ECS network configuration with non AWSVPC task definition fails - assert: - that: - - ecs_service_network_without_awsvpc_task is failed - - - name: scale down ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - role: "ecsServiceRole" - register: ecs_service_scale_down - - - name: pause to allow service to scale down - pause: - seconds: 60 - - - name: delete ECS service definition - ecs_service: - state: absent - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - register: delete_ecs_service - - - name: assert that deleting ECS service worked - assert: - that: - - delete_ecs_service.changed - - - name: assert that deleting ECS service worked - assert: - that: - - delete_ecs_service.changed - - - name: create VPC-networked task definition with host port set to 0 (expected to fail) - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - state: present - network_mode: awsvpc - register: ecs_task_definition_vpc_no_host_port - ignore_errors: yes - - - name: check that awsvpc task definition with host port 0 fails gracefully - assert: - that: - - ecs_task_definition_vpc_no_host_port is failed - - "'error' not in ecs_task_definition_vpc_no_host_port" - - - name: create VPC-networked task definition with host port set to 8080 - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - state: present - vars: - ecs_task_host_port: 8080 - register: ecs_task_definition_vpc_with_host_port - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - register: ecs_taskdefinition_info - - - name: assert that network mode is awsvpc - assert: - that: - - "ecs_taskdefinition_info.network_mode == 'awsvpc'" - - - name: pause to allow service to scale down - pause: - seconds: 60 - - - name: delete ECS service definition - ecs_service: - state: absent - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - register: delete_ecs_service - - - name: create ECS service definition with network configuration - ecs_service: - state: present - name: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - register: create_ecs_service_with_vpc - - - name: assert that network configuration is correct - assert: - that: - - "'networkConfiguration' in create_ecs_service_with_vpc.service" - - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration" - - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" - - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" - - - name: create dummy group to update ECS service with - ec2_group: - name: "{{ resource_prefix }}-ecs-vpc-test-sg" - description: "Test security group for ECS with VPC" - vpc_id: '{{ setup_vpc.vpc.id }}' - state: present - - - name: update ECS service definition with new network configuration - ecs_service: - state: present - name: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - "{{ resource_prefix }}-ecs-vpc-test-sg" - register: update_ecs_service_with_vpc - - - name: check that ECS service changed - assert: - that: - - update_ecs_service_with_vpc.changed - - "'networkConfiguration' in update_ecs_service_with_vpc.service" - - "'awsvpcConfiguration' in update_ecs_service_with_vpc.service.networkConfiguration" - - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" - - "update_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" - - - name: create ecs_service using health_check_grace_period_seconds - ecs_service: - name: "{{ ecs_service_name }}-mft" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - scheduling_strategy: "REPLICA" - health_check_grace_period_seconds: 10 - desired_count: 1 - state: present - register: ecs_service_creation_hcgp - - - - name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds - assert: - that: - - ecs_service_creation_hcgp.changed - - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 10" - - - name: update ecs_service using health_check_grace_period_seconds - ecs_service: - name: "{{ ecs_service_name }}-mft" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - health_check_grace_period_seconds: 30 - state: present - register: ecs_service_creation_hcgp2 - ignore_errors: no - - - name: check that module returns success - assert: - that: - - ecs_service_creation_hcgp2.changed - - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 30" - -# until ansible supports service registries, this test can't run. -# - name: update ecs_service using service_registries -# ecs_service: -# name: "{{ ecs_service_name }}-service-registries" -# cluster: "{{ ecs_cluster_name }}" -# load_balancers: -# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" -# containerName: "{{ ecs_task_name }}" -# containerPort: "{{ ecs_task_container_port }}" -# service_registries: -# - containerName: "{{ ecs_task_name }}" -# containerPort: "{{ ecs_task_container_port }}" -# ### TODO: Figure out how to get a service registry ARN without a service registry module. -# registryArn: "{{ ecs_task_service_registry_arn }}" -# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" -# desired_count: 1 -# state: present -# register: ecs_service_creation_sr -# ignore_errors: yes - -# - name: dump sr output -# debug: var=ecs_service_creation_sr - -# - name: check that module returns success -# assert: -# that: -# - ecs_service_creation_sr.changed - - - name: update ecs_service using REPLICA scheduling_strategy - ecs_service: - name: "{{ ecs_service_name }}-replica" - cluster: "{{ ecs_cluster_name }}" - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - scheduling_strategy: "REPLICA" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 1 - state: present - register: ecs_service_creation_replica - - - name: obtain facts for all ECS services in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - details: yes - events: no - register: ecs_service_info - - - name: assert that facts are useful - assert: - that: - - "'services' in ecs_service_info" - - ecs_service_info.services | length > 0 - - "'events' not in ecs_service_info.services[0]" - - - name: obtain facts for existing service in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - service: "{{ ecs_service_name }}" - details: yes - events: no - register: ecs_service_info - - - name: assert that existing service is available and running - assert: - that: - - "ecs_service_info.services|length == 1" - - "ecs_service_info.services_not_running|length == 0" - - - name: obtain facts for non-existent service in the cluster - ecs_service_info: - cluster: "{{ ecs_cluster_name }}" - service: madeup - details: yes - events: no - register: ecs_service_info - - - name: assert that non-existent service is missing - assert: - that: - - "ecs_service_info.services_not_running[0].reason == 'MISSING'" - - - name: obtain specific ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - details: yes - register: ecs_service_info - - - name: check that facts contain network configuration - assert: - that: - - "'networkConfiguration' in ecs_service_info.services[0]" - - - name: attempt to get facts from missing task definition - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}" - - - name: Create another task definition with placement constraints - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-constraints" - state: present - placement_constraints: "{{ ecs_taskdefinition_placement_constraints }}" - register: ecs_task_definition_constraints - - - name: Check that task definition has been created - assert: - that: - - ecs_task_definition_constraints is changed - - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == "{{ ecs_taskdefinition_placement_constraints[0].type }}" - - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == "{{ ecs_taskdefinition_placement_constraints[0].expression }}" - - - name: Remove ecs task definition with placement constraints - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-constraints" - revision: "{{ ecs_task_definition.taskdefinition.revision }}" - state: absent - register: ecs_task_definition_constraints_delete - - - name: Check that task definition has been deleted - assert: - that: - - ecs_task_definition_constraints_delete is changed - - - # ============================================================ - # Begin tests for Fargate - - - name: ensure AmazonECSTaskExecutionRolePolicy exists - iam_role: - name: ecsTaskExecutionRole - assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" - description: "Allows ECS containers to make calls to ECR" - state: present - create_instance_profile: no - managed_policy: - - AmazonEC2ContainerServiceRole - register: iam_execution_role - - - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail) - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: bridge - launch_type: FARGATE - cpu: 512 - memory: 1024 - state: present - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - register: ecs_fargate_task_definition_bridged_with_host_port - - - name: check that fargate task definition with bridged networking fails gracefully - assert: - that: - - ecs_fargate_task_definition_bridged_with_host_port is failed - - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"' - - - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail) - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - launch_type: FARGATE - state: present - ignore_errors: yes - register: ecs_fargate_task_definition_vpc_no_mem - - - name: check that fargate task definition without memory or cpu fails gracefully - assert: - that: - - ecs_fargate_task_definition_vpc_no_mem is failed - - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"' - - - name: create Fargate VPC-networked task definition with CPU or Memory and execution role - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - launch_type: FARGATE - cpu: 512 - memory: 1024 - execution_role_arn: "{{ iam_execution_role.arn }}" - state: present - vars: - ecs_task_host_port: 8080 - register: ecs_fargate_task_definition - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - - - name: create EC2 VPC-networked task definition with CPU or Memory and execution role - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - network_mode: awsvpc - launch_type: EC2 - cpu: 512 - memory: 1024 - execution_role_arn: "{{ iam_execution_role.arn }}" - state: present - vars: - ecs_task_host_port: 8080 - register: ecs_ec2_task_definition - - - name: obtain ECS task definition facts - ecs_taskdefinition_info: - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_ec2_task_definition.taskdefinition.revision }}" - - - name: check that changing task definiton launch type created a new task definition revision - assert: - that: - - ecs_fargate_task_definition.taskdefinition.revision != ecs_ec2_task_definition.taskdefinition.revision - - - name: create fargate ECS service without network config (expected to fail) - ecs_service: - state: present - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - launch_type: FARGATE - register: ecs_fargate_service_network_without_awsvpc - ignore_errors: yes - - - name: assert that using Fargate ECS service fails - assert: - that: - - ecs_fargate_service_network_without_awsvpc is failed - - - name: create fargate ECS service with network config - ecs_service: - state: present - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - desired_count: 1 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - launch_type: FARGATE - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - register: ecs_fargate_service_network_with_awsvpc - - - name: assert that public IP assignment is enabled - assert: - that: - - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"' - - - name: create fargate ECS task with run task - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - started_by: ansible_user - register: fargate_run_task_output - - # aws cli not installed in docker container; make sure it's installed. - - name: install awscli - pip: - state: present - name: awscli - - - name: disable taskLongArnFormat - command: aws ecs put-account-setting --name taskLongArnFormat --value disabled - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" - - - name: create fargate ECS task with run task and tags (LF disabled) (should fail) - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - tags: - tag_key: tag_value - tag_key2: tag_value2 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - started_by: ansible_user - register: fargate_run_task_output_with_tags_fail - ignore_errors: yes - - - name: enable taskLongArnFormat - command: aws ecs put-account-setting --name taskLongArnFormat --value enabled - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" - - - name: create fargate ECS task with run task and tags - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - tags: - tag_key: tag_value - tag_key2: tag_value2 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: true - started_by: ansible_user - register: fargate_run_task_output_with_tags - - - name: create fargate ECS task with run task and assign public ip disable - ecs_task: - operation: run - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc" - launch_type: FARGATE - count: 1 - network_configuration: - subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" - security_groups: - - '{{ setup_sg.group_id }}' - assign_public_ip: false - started_by: ansible_user - register: fargate_run_task_output_with_assign_ip - - - # ============================================================ - # End tests for Fargate - - - name: create task definition for absent with arn regression test - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-absent" - state: present - register: ecs_task_definition_absent_with_arn - - - name: absent task definition by arn - ecs_taskdefinition: - arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}" - state: absent - - always: - # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc - - name: Announce teardown start - debug: - msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" - - - name: obtain ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - details: yes - register: ecs_service_info - - - name: scale down ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - ignore_errors: yes - register: ecs_service_scale_down - - - name: obtain second ECS service facts - ecs_service_info: - service: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - details: yes - ignore_errors: yes - register: ecs_service_info - - - name: scale down second ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}2" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - placement_strategy: "{{ ecs_service_placement_strategy }}" - load_balancers: - - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - ignore_errors: yes - register: ecs_service_scale_down - - - name: scale down multifunction-test service - ecs_service: - name: "{{ ecs_service_name }}-mft" - cluster: "{{ ecs_cluster_name }}" - state: present - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - ignore_errors: yes - register: ecs_service_scale_down - - - name: scale down scheduling_strategy service - ecs_service: - name: "{{ ecs_service_name }}-replica" - cluster: "{{ ecs_cluster_name }}" - state: present - load_balancers: - - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" - containerName: "{{ ecs_task_name }}" - containerPort: "{{ ecs_task_container_port }}" - task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" - desired_count: 0 - ignore_errors: yes - register: ecs_service_scale_down - - -# until ansible supports service registries, the test for it can't run and this -# scale down is not needed -# - name: scale down service_registries service -# ecs_service: -# name: "{{ ecs_service_name }}-service-registries" -# cluster: "{{ ecs_cluster_name }}" -# state: present -# load_balancers: -# - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" -# containerName: "{{ ecs_task_name }}" -# containerPort: "{{ ecs_task_container_port }}" -# task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" -# desired_count: 0 -# ignore_errors: yes -# register: ecs_service_scale_down - - - name: scale down Fargate ECS service - ecs_service: - state: present - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" - desired_count: 0 - deployment_configuration: "{{ ecs_service_deployment_configuration }}" - ignore_errors: yes - register: ecs_service_scale_down - - - name: stop Fargate ECS task - ecs_task: - task: "{{ fargate_run_task_output.task[0].taskArn }}" - task_definition: "{{ ecs_task_name }}-vpc" - operation: stop - cluster: "{{ ecs_cluster_name }}" - ignore_errors: yes - - - name: stop Fargate ECS task - ecs_task: - task: "{{ fargate_run_task_output_with_tags.task[0].taskArn }}" - task_definition: "{{ ecs_task_name }}-vpc" - operation: stop - cluster: "{{ ecs_cluster_name }}" - ignore_errors: yes - - name: pause to allow services to scale down - pause: - seconds: 60 - when: ecs_service_scale_down is not failed - - - name: remove ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}" - ignore_errors: yes - - - name: remove second ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}2" - ignore_errors: yes - - - name: remove mft ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-mft" - ignore_errors: yes - - - name: remove scheduling_strategy ecs service - ecs_service: - state: absent - cluster: "{{ ecs_cluster_name }}" - name: "{{ ecs_service_name }}-replica" - ignore_errors: yes - -# until ansible supports service registries, the test for it can't run and this -# removal is not needed -# - name: remove service_registries ecs service -# ecs_service: -# state: absent -# cluster: "{{ ecs_cluster_name }}" -# name: "{{ ecs_service_name }}-service-registries" -# ignore_errors: yes - - - name: remove fargate ECS service - ecs_service: - state: absent - name: "{{ ecs_service_name }}4" - cluster: "{{ ecs_cluster_name }}" - ignore_errors: yes - register: ecs_fargate_service_network_with_awsvpc - - - name: remove ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - revision: "{{ ecs_task_definition.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove ecs task definition again - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}" - revision: "{{ ecs_task_definition_again.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove second ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" - state: absent - vars: - ecs_task_host_port: 8080 - ignore_errors: yes - - - name: remove fargate ecs task definition - ecs_taskdefinition: - containers: "{{ ecs_fargate_task_containers }}" - family: "{{ ecs_task_name }}-vpc" - revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove ecs task definition for absent with arn - ecs_taskdefinition: - containers: "{{ ecs_task_containers }}" - family: "{{ ecs_task_name }}-absent" - revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove load balancer - elb_application_lb: - name: "{{ ecs_load_balancer_name }}" - state: absent - wait: yes - ignore_errors: yes - register: elb_application_lb_remove - - - name: pause to allow target group to be disassociated - pause: - seconds: 30 - when: not elb_application_lb_remove is failed - - - name: remove target groups - elb_target_group: - name: "{{ item }}" - state: absent - with_items: - - "{{ ecs_target_group_name }}1" - - "{{ ecs_target_group_name }}2" - ignore_errors: yes - - - name: remove setup ec2 instance - ec2_instance: - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - wait: yes - ignore_errors: yes - - - name: remove setup keypair - ec2_key: - name: '{{ resource_prefix }}_ecs_cluster' - state: absent - ignore_errors: yes - - - name: remove security groups - ec2_group: - name: '{{ item }}' - description: 'created by Ansible integration tests' - state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - with_items: - - "{{ resource_prefix }}-ecs-vpc-test-sg" - - '{{ resource_prefix }}_ecs_cluster-sg' - ignore_errors: yes - - - name: remove IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - ignore_errors: yes - - - name: remove setup subnet - ec2_vpc_subnet: - az: '{{ aws_region }}{{ item.zone }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: "{{ item.cidr}}" - state: absent - with_items: - - zone: a - cidr: 10.0.1.0/24 - - zone: b - cidr: 10.0.2.0/24 - ignore_errors: yes - - - name: remove setup VPC - ec2_vpc_net: - cidr_block: 10.0.0.0/16 - state: absent - name: '{{ resource_prefix }}_ecs_cluster' - ignore_errors: yes - - - name: remove ECS cluster - ecs_cluster: - name: "{{ ecs_cluster_name }}" - state: absent - ignore_errors: yes diff --git a/tests/integration/targets/ecs_cluster/tasks/main.yml b/tests/integration/targets/ecs_cluster/tasks/main.yml index 31c0bd3857e..1a84b163265 100644 --- a/tests/integration/targets/ecs_cluster/tasks/main.yml +++ b/tests/integration/targets/ecs_cluster/tasks/main.yml @@ -8,8 +8,1033 @@ aws_secret_key: '{{ aws_secret_key }}' security_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' + block: + - name: ensure IAM instance role exists + iam_role: + name: ecsInstanceRole + assume_role_policy_document: "{{ lookup('file','ec2-trust-policy.json') }}" + state: present + create_instance_profile: yes + managed_policy: + - AmazonEC2ContainerServiceforEC2Role + + - name: ensure IAM service role exists + iam_role: + name: ecsServiceRole + assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" + state: present + create_instance_profile: no + managed_policy: + - AmazonEC2ContainerServiceRole + + - name: ensure AWSServiceRoleForECS role exists + iam_role_info: + name: AWSServiceRoleForECS + register: iam_role_result + + # FIXME: come up with a way to automate this + - name: fail if AWSServiceRoleForECS role does not exist + fail: + msg: > + Run `aws iam create-service-linked-role --aws-service-name=ecs.amazonaws.com ` to create + a linked role for AWS VPC load balancer management + when: not iam_role_result.iam_roles + + - name: create an ECS cluster + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: present + register: ecs_cluster + + - name: check that ecs_cluster changed + assert: + that: + - ecs_cluster.changed + + - name: create same ECS cluster (should do nothing) + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: present + register: ecs_cluster_again + + - name: check that ecs_cluster did not change + assert: + that: + - not ecs_cluster_again.changed + + - name: create a VPC to work in + ec2_vpc_net: + cidr_block: 10.0.0.0/16 + state: present + name: '{{ resource_prefix }}_ecs_cluster' + resource_tags: + Name: '{{ resource_prefix }}_ecs_cluster' + register: setup_vpc + + - name: create a key pair to use for creating an ec2 instance + ec2_key: + name: '{{ resource_prefix }}_ecs_cluster' + state: present + when: ec2_keypair is not defined # allow override in cloud-config-aws.ini + register: setup_key + + - name: create subnets + ec2_vpc_subnet: + az: '{{ aws_region }}{{ item.zone }}' + tags: + Name: '{{ resource_prefix }}_ecs_cluster-subnet-{{ item.zone }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: "{{ item.cidr }}" + state: present + register: setup_subnet + with_items: + - zone: a + cidr: 10.0.1.0/24 + - zone: b + cidr: 10.0.2.0/24 + + - name: create an internet gateway so that ECS agents can talk to ECS + ec2_vpc_igw: + vpc_id: '{{ setup_vpc.vpc.id }}' + state: present + register: igw + + - name: create a security group to use for creating an ec2 instance + ec2_group: + name: '{{ resource_prefix }}_ecs_cluster-sg' + description: 'created by Ansible integration tests' + state: present + vpc_id: '{{ setup_vpc.vpc.id }}' + rules: # allow all ssh traffic but nothing else + - ports: 22 + cidr: 0.0.0.0/0 + register: setup_sg + + - name: find a suitable AMI + ec2_ami_info: + owner: amazon + filters: + description: "Amazon Linux AMI* ECS *" + register: ec2_ami_info + + - name: set image id fact + set_fact: + ecs_image_id: "{{ (ec2_ami_info.images|last).image_id }}" + + - name: provision ec2 instance to create an image + ec2_instance: + key_name: '{{ ec2_keypair|default(setup_key.key.name) }}' + instance_type: t3.micro + state: present + image_id: '{{ ecs_image_id }}' + wait: yes + user_data: "{{ user_data }}" + instance_role: ecsInstanceRole + tags: + Name: '{{ resource_prefix }}_ecs_agent' + security_group: '{{ setup_sg.group_id }}' + vpc_subnet_id: '{{ setup_subnet.results[0].subnet.id }}' + register: setup_instance + + - name: create target group + elb_target_group: + name: "{{ ecs_target_group_name }}1" + state: present + protocol: HTTP + port: 8080 + modify_targets: no + vpc_id: '{{ setup_vpc.vpc.id }}' + target_type: instance + register: elb_target_group_instance + + - name: create second target group to use ip target_type + elb_target_group: + name: "{{ ecs_target_group_name }}2" + state: present + protocol: HTTP + port: 8080 + modify_targets: no + vpc_id: '{{ setup_vpc.vpc.id }}' + target_type: ip + register: elb_target_group_ip + + - name: create load balancer + elb_application_lb: + name: "{{ ecs_load_balancer_name }}" + state: present + scheme: internal + security_groups: '{{ setup_sg.group_id }}' + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ ecs_target_group_name }}1" + - Protocol: HTTP + Port: 81 + DefaultActions: + - Type: forward + TargetGroupName: "{{ ecs_target_group_name }}2" + + - name: create task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + state: present + register: ecs_task_definition + + - name: check that initial task definition changes + assert: + that: + - ecs_task_definition.changed + + - name: recreate task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + state: present + register: ecs_task_definition_again + + - name: check that task definition does not change + assert: + that: + - not ecs_task_definition_again.changed + + - name: obtain ECS task definition facts + ecs_taskdefinition_info: + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + + - name: create ECS service definition + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "ecsServiceRole" + register: ecs_service + + - name: check that ECS service creation changed + assert: + that: + - ecs_service.changed + + - name: create same ECS service definition (should not change) + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "ecsServiceRole" + register: ecs_service_again + + - name: check that ECS service recreation changed nothing + assert: + that: + - not ecs_service_again.changed + + - name: create same ECS service definition via force_new_deployment + ecs_service: + state: present + force_new_deployment: yes + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + health_check_grace_period_seconds: "{{ ecs_service_health_check_grace_period }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "ecsServiceRole" + register: ecs_service_again + + - name: check that ECS service recreation changed again due force_new_deployment + assert: + that: + - ecs_service_again.changed + + - name: attempt to use ECS network configuration on task definition without awsvpc network_mode (expected to fail) + ecs_service: + state: present + name: "{{ ecs_service_name }}3" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + register: ecs_service_network_without_awsvpc_task + ignore_errors: yes + + - name: assert that using ECS network configuration with non AWSVPC task definition fails + assert: + that: + - ecs_service_network_without_awsvpc_task is failed + + - name: scale down ECS service + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + role: "ecsServiceRole" + register: ecs_service_scale_down + + - name: pause to allow service to scale down + pause: + seconds: 60 + + - name: delete ECS service definition + ecs_service: + state: absent + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + register: delete_ecs_service + + - name: assert that deleting ECS service worked + assert: + that: + - delete_ecs_service.changed + + - name: assert that deleting ECS service worked + assert: + that: + - delete_ecs_service.changed + + - name: create VPC-networked task definition with host port set to 0 (expected to fail) + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + state: present + network_mode: awsvpc + register: ecs_task_definition_vpc_no_host_port + ignore_errors: yes + + - name: check that awsvpc task definition with host port 0 fails gracefully + assert: + that: + - ecs_task_definition_vpc_no_host_port is failed + - "'error' not in ecs_task_definition_vpc_no_host_port" + + - name: create VPC-networked task definition with host port set to 8080 + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + state: present + vars: + ecs_task_host_port: 8080 + register: ecs_task_definition_vpc_with_host_port + + - name: obtain ECS task definition facts + ecs_taskdefinition_info: + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" + register: ecs_taskdefinition_info + + - name: assert that network mode is awsvpc + assert: + that: + - "ecs_taskdefinition_info.network_mode == 'awsvpc'" + + - name: pause to allow service to scale down + pause: + seconds: 60 + + - name: create ECS service definition with network configuration + ecs_service: + state: present + name: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_ip.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + register: create_ecs_service_with_vpc + + - name: assert that network configuration is correct + assert: + that: + - "'networkConfiguration' in create_ecs_service_with_vpc.service" + - "'awsvpcConfiguration' in create_ecs_service_with_vpc.service.networkConfiguration" + - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.subnets|length == 2" + - "create_ecs_service_with_vpc.service.networkConfiguration.awsvpcConfiguration.securityGroups|length == 1" + + - name: create ecs_service using health_check_grace_period_seconds + ecs_service: + name: "{{ ecs_service_name }}-mft" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + scheduling_strategy: "REPLICA" + health_check_grace_period_seconds: 30 + desired_count: 1 + state: present + register: ecs_service_creation_hcgp + + + - name: health_check_grace_period_seconds sets HealthChecGracePeriodSeconds + assert: + that: + - ecs_service_creation_hcgp.changed + - "{{ecs_service_creation_hcgp.service.healthCheckGracePeriodSeconds}} == 30" + + - name: update ecs_service using health_check_grace_period_seconds + ecs_service: + name: "{{ ecs_service_name }}-mft" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + health_check_grace_period_seconds: 10 + state: present + register: ecs_service_creation_hcgp2 + + - name: check that module returns success + assert: + that: + - ecs_service_creation_hcgp2.changed + - "{{ecs_service_creation_hcgp2.service.healthCheckGracePeriodSeconds}} == 10" + + - name: update ecs_service using REPLICA scheduling_strategy + ecs_service: + name: "{{ ecs_service_name }}-replica" + cluster: "{{ ecs_cluster_name }}" + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + scheduling_strategy: "REPLICA" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 1 + state: present + register: ecs_service_creation_replica + + - name: obtain facts for all ECS services in the cluster + ecs_service_info: + cluster: "{{ ecs_cluster_name }}" + details: yes + events: no + register: ecs_service_info + + - name: assert that facts are useful + assert: + that: + - "'services' in ecs_service_info" + - ecs_service_info.services | length > 0 + - "'events' not in ecs_service_info.services[0]" + + - name: obtain facts for existing service in the cluster + ecs_service_info: + cluster: "{{ ecs_cluster_name }}" + service: "{{ ecs_service_name }}" + details: yes + events: no + register: ecs_service_info + + - name: assert that existing service is available and running + assert: + that: + - "ecs_service_info.services|length == 1" + - "ecs_service_info.services_not_running|length == 0" + + - name: obtain facts for non-existent service in the cluster + ecs_service_info: + cluster: "{{ ecs_cluster_name }}" + service: madeup + details: yes + events: no + register: ecs_service_info + + - name: assert that non-existent service is missing + assert: + that: + - "ecs_service_info.services_not_running[0].reason == 'MISSING'" + + - name: obtain specific ECS service facts + ecs_service_info: + service: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + details: yes + register: ecs_service_info + + - name: check that facts contain network configuration + assert: + that: + - "'networkConfiguration' in ecs_service_info.services[0]" + + - name: attempt to get facts from missing task definition + ecs_taskdefinition_info: + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_task_definition.taskdefinition.revision + 1}}" + + - name: Create another task definition with placement constraints + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-constraints" + state: present + placement_constraints: "{{ ecs_taskdefinition_placement_constraints }}" + register: ecs_task_definition_constraints + + - name: Check that task definition has been created + assert: + that: + - ecs_task_definition_constraints is changed + - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].type == "{{ ecs_taskdefinition_placement_constraints[0].type }}" + - ecs_task_definition_constraints.taskdefinition.placementConstraints[0].expression == "{{ ecs_taskdefinition_placement_constraints[0].expression }}" + + - name: Remove ecs task definition with placement constraints + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + arn: "{{ ecs_task_definition_constraints.taskdefinition.taskDefinitionArn }}" + state: absent + register: ecs_task_definition_constraints_delete + + - name: Check that task definition has been deleted + assert: + that: + - ecs_task_definition_constraints_delete is changed + + # ============================================================ + # Begin tests for Fargate + + - name: ensure AmazonECSTaskExecutionRolePolicy exists + iam_role: + name: ecsTaskExecutionRole + assume_role_policy_document: "{{ lookup('file','ecs-trust-policy.json') }}" + description: "Allows ECS containers to make calls to ECR" + state: present + create_instance_profile: no + managed_policy: + - AmazonEC2ContainerServiceRole + register: iam_execution_role + + - name: create Fargate VPC-networked task definition with host port set to 8080 and unsupported network mode (expected to fail) + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: bridge + launch_type: FARGATE + cpu: 512 + memory: 1024 + state: present + vars: + ecs_task_host_port: 8080 + ignore_errors: yes + register: ecs_fargate_task_definition_bridged_with_host_port + + - name: check that fargate task definition with bridged networking fails gracefully + assert: + that: + - ecs_fargate_task_definition_bridged_with_host_port is failed + - 'ecs_fargate_task_definition_bridged_with_host_port.msg == "To use FARGATE launch type, network_mode must be awsvpc"' + + - name: create Fargate VPC-networked task definition without CPU or Memory (expected to Fail) + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + launch_type: FARGATE + state: present + ignore_errors: yes + register: ecs_fargate_task_definition_vpc_no_mem + + - name: check that fargate task definition without memory or cpu fails gracefully + assert: + that: + - ecs_fargate_task_definition_vpc_no_mem is failed + - 'ecs_fargate_task_definition_vpc_no_mem.msg == "launch_type is FARGATE but all of the following are missing: cpu, memory"' + + - name: create Fargate VPC-networked task definition with CPU or Memory and execution role + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + launch_type: FARGATE + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + vars: + ecs_task_host_port: 8080 + register: ecs_fargate_task_definition + + - name: create EC2 VPC-networked task definition with CPU or Memory and execution role + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + network_mode: awsvpc + launch_type: EC2 + cpu: 512 + memory: 1024 + execution_role_arn: "{{ iam_execution_role.arn }}" + state: present + vars: + ecs_task_host_port: 8080 + register: ecs_ec2_task_definition + + - name: check that changing task definiton launch type created a new task definition revision + assert: + that: + - ecs_fargate_task_definition.taskdefinition.revision != ecs_ec2_task_definition.taskdefinition.revision + + - name: create fargate ECS service without network config (expected to fail) + ecs_service: + state: present + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + launch_type: FARGATE + register: ecs_fargate_service_network_without_awsvpc + ignore_errors: yes + + - name: assert that using Fargate ECS service fails + assert: + that: + - ecs_fargate_service_network_without_awsvpc is failed + + - name: create fargate ECS service with network config + ecs_service: + state: present + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" + desired_count: 1 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + launch_type: FARGATE + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + register: ecs_fargate_service_network_with_awsvpc + + - name: assert that public IP assignment is enabled + assert: + that: + - 'ecs_fargate_service_network_with_awsvpc.service.networkConfiguration.awsvpcConfiguration.assignPublicIp == "ENABLED"' + + - name: create fargate ECS task with run task + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + started_by: ansible_user + register: fargate_run_task_output + + - name: create fargate ECS task with run task and tags (LF disabled) (should fail) + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + tags: + tag_key: tag_value + tag_key2: tag_value2 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + started_by: ansible_user + register: fargate_run_task_output_with_tags_fail + ignore_errors: yes + + - name: enable taskLongArnFormat + command: aws ecs put-account-setting --name taskLongArnFormat --value enabled + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + + - name: create fargate ECS task with run task and tags + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + tags: + tag_key: tag_value + tag_key2: tag_value2 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: true + started_by: ansible_user + register: fargate_run_task_output_with_tags + + - name: create fargate ECS task with run task and assign public ip disable + ecs_task: + operation: run + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc" + launch_type: FARGATE + count: 1 + network_configuration: + subnets: "{{ setup_subnet.results | map(attribute='subnet.id') | list }}" + security_groups: + - '{{ setup_sg.group_id }}' + assign_public_ip: false + started_by: ansible_user + register: fargate_run_task_output_with_assign_ip + + + # ============================================================ + # End tests for Fargate + + - name: create task definition for absent with arn regression test + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-absent" + state: present + register: ecs_task_definition_absent_with_arn + + - name: absent task definition by arn + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + arn: "{{ ecs_task_definition_absent_with_arn.taskdefinition.taskDefinitionArn }}" + state: absent + + always: + # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc + - name: Announce teardown start + debug: + msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" + + - name: remove setup ec2 instance + ec2_instance: + instance_ids: '{{ setup_instance.instance_ids }}' + state: absent + wait: yes + ignore_errors: yes + + - name: obtain ECS service facts + ecs_service_info: + service: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + details: yes + register: ecs_service_info + + - name: scale down ECS service + ecs_service: + state: present + name: "{{ ecs_service_name }}" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + ignore_errors: yes + register: ecs_service_scale_down + + - name: obtain second ECS service facts + ecs_service_info: + service: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + details: yes + ignore_errors: yes + register: ecs_service_info + + - name: scale down second ECS service + ecs_service: + state: present + name: "{{ ecs_service_name }}2" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_service_info.services[0].taskDefinition }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + placement_strategy: "{{ ecs_service_placement_strategy }}" + load_balancers: + - targetGroupArn: "{{ ecs_service_info.services[0].loadBalancers[0].targetGroupArn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + ignore_errors: yes + register: ecs_service_scale_down + + - name: scale down multifunction-test service + ecs_service: + name: "{{ ecs_service_name }}-mft" + cluster: "{{ ecs_cluster_name }}" + state: present + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + ignore_errors: yes + register: ecs_service_scale_down + + - name: scale down scheduling_strategy service + ecs_service: + name: "{{ ecs_service_name }}-replica" + cluster: "{{ ecs_cluster_name }}" + state: present + load_balancers: + - targetGroupArn: "{{ elb_target_group_instance.target_group_arn }}" + containerName: "{{ ecs_task_name }}" + containerPort: "{{ ecs_task_container_port }}" + task_definition: "{{ ecs_task_name }}:{{ ecs_task_definition.taskdefinition.revision }}" + desired_count: 0 + ignore_errors: yes + register: ecs_service_scale_down + + - name: scale down Fargate ECS service + ecs_service: + state: present + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + task_definition: "{{ ecs_task_name }}-vpc:{{ ecs_fargate_task_definition.taskdefinition.revision }}" + desired_count: 0 + deployment_configuration: "{{ ecs_service_deployment_configuration }}" + ignore_errors: yes + register: ecs_service_scale_down + + - name: stop Fargate ECS task + ecs_task: + task: "{{ fargate_run_task_output.task[0].taskArn }}" + task_definition: "{{ ecs_task_name }}-vpc" + operation: stop + cluster: "{{ ecs_cluster_name }}" + ignore_errors: yes + + - name: stop Fargate ECS task + ecs_task: + task: "{{ fargate_run_task_output_with_tags.task[0].taskArn }}" + task_definition: "{{ ecs_task_name }}-vpc" + operation: stop + cluster: "{{ ecs_cluster_name }}" + ignore_errors: yes + + - name: pause to allow services to scale down + pause: + seconds: 60 + when: ecs_service_scale_down is not failed + + - name: remove ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}" + force_deletion: yes + ignore_errors: yes + + - name: remove second ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}2" + force_deletion: yes + ignore_errors: yes + + - name: remove mft ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-mft" + force_deletion: yes + ignore_errors: yes + + - name: remove scheduling_strategy ecs service + ecs_service: + state: absent + cluster: "{{ ecs_cluster_name }}" + name: "{{ ecs_service_name }}-replica" + force_deletion: yes + ignore_errors: yes + + - name: remove fargate ECS service + ecs_service: + state: absent + name: "{{ ecs_service_name }}4" + cluster: "{{ ecs_cluster_name }}" + force_deletion: yes + ignore_errors: yes + register: ecs_fargate_service_network_with_awsvpc + + - name: remove ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + revision: "{{ ecs_task_definition.taskdefinition.revision }}" + state: absent + vars: + ecs_task_host_port: 8080 + ignore_errors: yes + + - name: remove ecs task definition again + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}" + revision: "{{ ecs_task_definition_again.taskdefinition.revision }}" + state: absent + vars: + ecs_task_host_port: 8080 + ignore_errors: yes + + - name: remove second ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + revision: "{{ ecs_task_definition_vpc_with_host_port.taskdefinition.revision }}" + state: absent + vars: + ecs_task_host_port: 8080 + ignore_errors: yes + + - name: remove fargate ecs task definition + ecs_taskdefinition: + containers: "{{ ecs_fargate_task_containers }}" + family: "{{ ecs_task_name }}-vpc" + revision: "{{ ecs_fargate_task_definition.taskdefinition.revision }}" + state: absent + ignore_errors: yes + + - name: remove ecs task definition for absent with arn + ecs_taskdefinition: + containers: "{{ ecs_task_containers }}" + family: "{{ ecs_task_name }}-absent" + revision: "{{ ecs_task_definition_absent_with_arn.taskdefinition.revision }}" + state: absent + ignore_errors: yes + + - name: remove load balancer + elb_application_lb: + name: "{{ ecs_load_balancer_name }}" + state: absent + wait: yes + ignore_errors: yes + register: elb_application_lb_remove + + - name: pause to allow target group to be disassociated + pause: + seconds: 30 + when: not elb_application_lb_remove is failed + + - name: remove setup keypair + ec2_key: + name: '{{ resource_prefix }}_ecs_cluster' + state: absent + ignore_errors: yes + + - name: remove ECS cluster + ecs_cluster: + name: "{{ ecs_cluster_name }}" + state: absent + ignore_errors: yes + register: this_deletion + retries: 12 + delay: 10 + until: this_deletion is not failed + + - name: remove security groups + ec2_group: + name: '{{ item }}' + description: 'created by Ansible integration tests' + state: absent + vpc_id: '{{ setup_vpc.vpc.id }}' + with_items: + - '{{ resource_prefix }}_ecs_cluster-sg' + ignore_errors: yes + register: this_deletion + retries: 10 + delay: 10 + until: this_deletion is not failed + + - name: remove target groups + elb_target_group: + name: "{{ item }}" + state: absent + with_items: + - "{{ ecs_target_group_name }}1" + - "{{ ecs_target_group_name }}2" + ignore_errors: yes + + - name: remove IGW + ec2_vpc_igw: + state: absent + vpc_id: '{{ setup_vpc.vpc.id }}' + ignore_errors: yes + + - name: remove setup subnet + ec2_vpc_subnet: + az: '{{ aws_region }}{{ item.zone }}' + vpc_id: '{{ setup_vpc.vpc.id }}' + cidr: "{{ item.cidr}}" + state: absent + with_items: + - zone: a + cidr: 10.0.1.0/24 + - zone: b + cidr: 10.0.2.0/24 + ignore_errors: yes - - include_tasks: network_force_new_deployment.yml - - include_tasks: force_service_deletion.yml - - include_tasks: full_test.yml + - name: remove setup VPC + ec2_vpc_net: + cidr_block: 10.0.0.0/16 + state: absent + name: '{{ resource_prefix }}_ecs_cluster' + ignore_errors: yes diff --git a/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml b/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml deleted file mode 100644 index febb4b1062c..00000000000 --- a/tests/integration/targets/ecs_cluster/tasks/network_force_new_deployment.yml +++ /dev/null @@ -1,107 +0,0 @@ ---- -- block: - - name: create ecs cluster - ecs_cluster: - name: "{{ resource_prefix }}" - state: present - - - name: create ecs_taskdefinition - ecs_taskdefinition: - containers: - - name: my_container - image: ubuntu - memory: 128 - family: "{{ resource_prefix }}" - state: present - register: ecs_taskdefinition_creation - - # even after deleting the cluster and recreating with a different name - # the previous service can prevent the current service from starting - # while it's in a draining state. Check the service info and sleep - # if the service does not report as inactive. - - - name: check if service is still running from a previous task - ecs_service_info: - service: "{{ resource_prefix }}" - cluster: "{{ resource_prefix }}" - details: yes - register: ecs_service_info_results - - name: delay if the service was not inactive - debug: var=ecs_service_info_results - - - name: delay if the service was not inactive - pause: - seconds: 30 - when: - - ecs_service_info_results.services|length >0 - - ecs_service_info_results.services[0]['status'] != 'INACTIVE' - - - name: create ecs_service - ecs_service: - name: "{{ resource_prefix }}" - cluster: "{{ resource_prefix }}" - task_definition: "{{ resource_prefix }}" - desired_count: 1 - state: present - register: ecs_service_creation - - - name: ecs_service works fine even when older botocore is used - assert: - that: - - ecs_service_creation.changed - - - name: create ecs_service using force_new_deployment - ecs_service: - name: "{{ resource_prefix }}" - cluster: "{{ resource_prefix }}" - task_definition: "{{ resource_prefix }}" - desired_count: 1 - force_new_deployment: true - state: present - register: ecs_service_creation_force_new_deploy - ignore_errors: yes - - - name: check that module returns success - assert: - that: - - ecs_service_creation_force_new_deploy.changed - - always: - - name: scale down ecs service - ecs_service: - name: "{{ resource_prefix }}" - cluster: "{{ resource_prefix }}" - task_definition: "{{ resource_prefix }}" - desired_count: 0 - state: present - ignore_errors: yes - - - name: pause to wait for scale down - pause: - seconds: 30 - - - name: remove ecs service - ecs_service: - name: "{{ resource_prefix }}" - cluster: "{{ resource_prefix }}" - task_definition: "{{ resource_prefix }}" - desired_count: 1 - state: absent - ignore_errors: yes - - - name: remove ecs task definition - ecs_taskdefinition: - containers: - - name: my_container - image: ubuntu - memory: 128 - family: "{{ resource_prefix }}" - revision: "{{ ecs_taskdefinition_creation.taskdefinition.revision }}" - state: absent - ignore_errors: yes - - - name: remove ecs cluster - ecs_cluster: - name: "{{ resource_prefix }}" - state: absent - ignore_errors: yes