diff --git a/.github/workflows/sanity.yml b/.github/workflows/sanity.yml index 01bc4c9c086..c7ebf34c431 100644 --- a/.github/workflows/sanity.yml +++ b/.github/workflows/sanity.yml @@ -60,5 +60,9 @@ jobs: { "ansible-version": "devel", "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.9" } ] diff --git a/.github/workflows/units.yml b/.github/workflows/units.yml index b55028a08df..adfa2dddd1f 100644 --- a/.github/workflows/units.yml +++ b/.github/workflows/units.yml @@ -59,6 +59,10 @@ jobs: { "ansible-version": "devel", "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.9" } ] collection_pre_install: '' diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a91805f278b..79caa61e435 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,51 @@ amazon.aws Release Notes .. contents:: Topics +v6.1.0 +====== + +Release Summary +--------------- + +This release brings some new features, several bugfixes, and deprecated features are also included. + +Minor Changes +------------- + +- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). +- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + +Deprecated Features +------------------- + +- s3_object - support for passing object keys with a leading ``/`` has been deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + +Bugfixes +-------- + +- autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). +- ec2_metadata_facts - Handle decompression when EC2 instance user-data is gzip compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data unless the header explicitly contains ``Content-Encoding: gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575). +- elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). +- module_utils.acm - fixes list_certificates returning only RSA_2048 certificates (https://github.com/ansible-collections/amazon.aws/issues/1567). +- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." + +v6.0.1 +====== + +Release Summary +--------------- + +This is a patch release that includes some bug fixes for the aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + +Bugfixes +-------- + +- aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). +- s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + v6.0.0 ====== diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 8ef0957eea9..3686ed2a1b6 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -2114,3 +2114,63 @@ releases: name: aws_collection_constants namespace: null release_date: '2023-05-09' + 6.0.1: + changes: + bugfixes: + - aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided + and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). + - s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). + - s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + release_summary: This is a patch release that includes some bug fixes for the + aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + fragments: + - 1538-s3-null.yml + - 1548-s3_object-leading-slash.yml + - 1551-ec2_inventory-no-region.yml + - 1560-revert_1546.yml + - release_summary.yml + release_date: '2023-05-19' + 6.1.0: + changes: + bugfixes: + - autoscaling_group - fix ValidationError when describing an autoscaling group + that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - autoscaling_group_info - fix ValidationError when describing an autoscaling + group that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). + - 'ec2_metadata_facts - Handle decompression when EC2 instance user-data is + gzip compressed. The fetch_url method from ansible.module_utils.urls does + not decompress the user-data unless the header explicitly contains ``Content-Encoding: + gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575).' + - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` + was including ALB-specific attributes when updating an existing ALB but not + when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). + - module_utils.acm - fixes list_certificates returning only RSA_2048 certificates + (https://github.com/ansible-collections/amazon.aws/issues/1567). + - rds_instance - add support for CACertificateIdentifier to create/update rds + instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." + deprecated_features: + - s3_object - support for passing object keys with a leading ``/`` has been + deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + minor_changes: + - ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). + - ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + release_summary: This release brings some new features, several bugfixes, and + deprecated features are also included. + fragments: + - 1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml + - 1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml + - 1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml + - 1548-s3_object-leading-slash-deprecate.yml + - 1567-list-certificate-all-key-types.yml + - 1578-rds-instance-docs.yml + - 1593-autoscaling_group_info-20-target-groups-per-call.yml + - 20230526-ec2_mertadata_facts-handle_compressed_user_data.yml + - 20230531-aws_ec2-use_ssm_inventory_documentation.yml + - ec2-inventory-hostnames-doc.yml + - ec2_instance-eni-attach-idempotency.yml + - release_summary.yml + - test-reqs.yml + release_date: '2023-06-07' diff --git a/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml b/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml deleted file mode 100644 index 349a148abe3..00000000000 --- a/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: -- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." diff --git a/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml b/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml deleted file mode 100644 index c3fe986018b..00000000000 --- a/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). -- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). diff --git a/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml b/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml deleted file mode 100644 index 763a71e2306..00000000000 --- a/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). diff --git a/changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml b/changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml deleted file mode 100644 index 8260c7af71a..00000000000 --- a/changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: -- s3_object - support for passing object keys with a leading ``/`` has been deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). diff --git a/changelogs/fragments/1589-return_false_when_no_change..yml b/changelogs/fragments/1589-return_false_when_no_change..yml new file mode 100644 index 00000000000..5b5367c5f63 --- /dev/null +++ b/changelogs/fragments/1589-return_false_when_no_change..yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- Fixes changed status to report False when no change has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) \ No newline at end of file diff --git a/changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml b/changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml new file mode 100644 index 00000000000..e72d9944607 --- /dev/null +++ b/changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- ec2_vpc_nat_gateway - adding a boolean parameter called ``default_create`` to allow users to have the option to choose whether they want to display an error message or create a NAT gateway when an EIP address is not found. The module (ec2_vpc_nat_gateway) had incorrectly failed silently if EIP didn't exist (https://github.com/ansible-collections/amazon.aws/issues/1295). \ No newline at end of file diff --git a/changelogs/fragments/1615-no_formatted_with_extra_space.yml b/changelogs/fragments/1615-no_formatted_with_extra_space.yml new file mode 100644 index 00000000000..693362b6950 --- /dev/null +++ b/changelogs/fragments/1615-no_formatted_with_extra_space.yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- Fixes to the stack trace output, where it does not contain spaces between each character. The module had incorrectly always outputted extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) \ No newline at end of file diff --git a/changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml b/changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml new file mode 100644 index 00000000000..4b820a3498f --- /dev/null +++ b/changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml @@ -0,0 +1,2 @@ +bugfixes: +- ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a private NAT gateway, an Elastic IP address should not be allocated. The module had inncorrectly always allocate elastic IP address when creating private nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). \ No newline at end of file diff --git a/changelogs/fragments/1633-backup-selection-conditions.yml b/changelogs/fragments/1633-backup-selection-conditions.yml new file mode 100644 index 00000000000..8fac9ef4ce3 --- /dev/null +++ b/changelogs/fragments/1633-backup-selection-conditions.yml @@ -0,0 +1,5 @@ +minor_changes: +- backup_selection - add validation and documentation for all conditions suboptions (https://github.com/ansible-collections/amazon.aws/pull/1633). + +bugfixes: +- module_utils.backup - get_selection_details fix empty list returned when multiple backup selections exist (https://github.com/ansible-collections/amazon.aws/pull/1633). diff --git a/changelogs/fragments/1843-iam_instance_profile.yml b/changelogs/fragments/1843-iam_instance_profile.yml new file mode 100644 index 00000000000..3cfb18270d4 --- /dev/null +++ b/changelogs/fragments/1843-iam_instance_profile.yml @@ -0,0 +1,2 @@ +trivial: +- new modules - iam_instance_profile, iam_instance_profile_info diff --git a/changelogs/fragments/1846-arn-validation.yml b/changelogs/fragments/1846-arn-validation.yml new file mode 100644 index 00000000000..56be4417e26 --- /dev/null +++ b/changelogs/fragments/1846-arn-validation.yml @@ -0,0 +1,5 @@ +minor_changes: +- ec2_instance - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- iam_user - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - added ``validate_aws_arn`` function to handle common pattern matching for ARNs (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - add ``resource_id`` and ``resource_type`` to ``parse_aws_arn`` return values (https://github.com/ansible-collections/amazon.aws/pull/1619). diff --git a/changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml b/changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml new file mode 100644 index 00000000000..4a51fd47532 --- /dev/null +++ b/changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml @@ -0,0 +1,2 @@ +trivial: + - "Fix sanity errors autoscaling_group and autoscaling_group_info." diff --git a/changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml b/changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml new file mode 100644 index 00000000000..d244b1ee3de --- /dev/null +++ b/changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - backup_plan - Use existing `scrub_none_values` function from module_utils to remove None values from nested dicts in supplied params. Nested None values were being retained and causing an error when sent through to the boto3 client operation (https://github.com/ansible-collections/amazon.aws/pull/1611). diff --git a/changelogs/fragments/20230612-backup_vault-fix-tag-update.yml b/changelogs/fragments/20230612-backup_vault-fix-tag-update.yml new file mode 100644 index 00000000000..7ed9c45ca0b --- /dev/null +++ b/changelogs/fragments/20230612-backup_vault-fix-tag-update.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - backup_vault - fix error when updating tags on a backup vault by using the correct boto3 client methods for tagging and untagging backup resources (https://github.com/ansible-collections/amazon.aws/pull/1610). diff --git a/changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml b/changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml new file mode 100644 index 00000000000..9157ec0d0b0 --- /dev/null +++ b/changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml @@ -0,0 +1,2 @@ +minor_changes: +- s3_object - Allow recursive copy of objects in S3 bucket (https://github.com/ansible-collections/amazon.aws/issues/1379). diff --git a/changelogs/fragments/20230627-ci-fixup.yml b/changelogs/fragments/20230627-ci-fixup.yml new file mode 100644 index 00000000000..160eb26b459 --- /dev/null +++ b/changelogs/fragments/20230627-ci-fixup.yml @@ -0,0 +1,3 @@ +trivial: +- CI fixup - ``include:`` should be ``include_tasks:``. +- CI fixup - YAML ``Null`` no longer matches any explicit parameter type requirements. diff --git a/changelogs/fragments/20230702-isort.yml b/changelogs/fragments/20230702-isort.yml new file mode 100644 index 00000000000..5ceaa201c0e --- /dev/null +++ b/changelogs/fragments/20230702-isort.yml @@ -0,0 +1,2 @@ +trivial: +- added isort configs to pyproject.toml diff --git a/changelogs/fragments/ec2_instance_info-support-new-attribute.yml b/changelogs/fragments/ec2_instance_info-support-new-attribute.yml new file mode 100644 index 00000000000..5025aed21e7 --- /dev/null +++ b/changelogs/fragments/ec2_instance_info-support-new-attribute.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - ec2_instance_info - add new parameter `include_attributes` to describe instance attributes (https://github.com/ansible-collections/amazon.aws/pull/1577). diff --git a/changelogs/fragments/test-reqs.yml b/changelogs/fragments/test-reqs.yml deleted file mode 100644 index 3ae8d00f4de..00000000000 --- a/changelogs/fragments/test-reqs.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "Move Galaxy test requirements from old transitional format in tests/requirements.yml to a standard Ansible Galaxy requirements file in tests/integration/requirements.yml." diff --git a/docs/docsite/rst/aws_ec2_guide.rst b/docs/docsite/rst/aws_ec2_guide.rst index 6d8b9f2123d..514d8ada439 100644 --- a/docs/docsite/rst/aws_ec2_guide.rst +++ b/docs/docsite/rst/aws_ec2_guide.rst @@ -484,7 +484,9 @@ Now the output of ``ansible-inventory -i demo.aws_ec2.yml --list``: ``use_ssm_inventory`` --------------------- -``use_ssm_inventory: True`` will include SSM inventory variables into hostvars for ssm-configured instances. +``use_ssm_inventory: True`` enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. By leveraging the SSM inventory data, the ``use_ssm_inventory`` option provides additional details and attributes about the EC2 instances in your inventory. +These details can include operating system information, installed software, network configurations, and custom inventory attributes defined in SSM. + ``cache`` --------- diff --git a/meta/runtime.yml b/meta/runtime.yml index 0242673f7ec..b4e6d17cd3e 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -67,6 +67,8 @@ action_groups: - elb_application_lb_info - elb_classic_lb - execute_lambda + - iam_instance_profile + - iam_instance_profile_info - iam_policy - iam_policy_info - iam_user diff --git a/plugins/inventory/aws_ec2.py b/plugins/inventory/aws_ec2.py index 260ad03d51f..6de182bd59e 100644 --- a/plugins/inventory/aws_ec2.py +++ b/plugins/inventory/aws_ec2.py @@ -32,16 +32,17 @@ hostnames: description: - A list in order of precedence for hostname variables. + - The elements of the list can be a dict with the keys mentioned below or a string. + - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - If value provided does not exist in the above options, it will be used as a literal string. + - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. type: list - elements: dict + elements: raw default: [] suboptions: name: description: - Name of the host. - - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). - - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. - - If value provided does not exist in the above options, it will be used as a literal string. type: str required: True prefix: @@ -137,7 +138,10 @@ version_added: 3.1.0 use_ssm_inventory: description: - - Add SSM inventory information into hostvars. + - Enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. + - By leveraging the SSM inventory data, the I(use_ssm_inventory) option provides additional details and attributes + about the EC2 instances in your inventory. These details can include operating system information, installed software, + network configurations, and custom inventory attributes defined in SSM. type: bool default: False version_added: 6.0.0 @@ -773,6 +777,9 @@ def parse(self, inventory, loader, path, cache=True): use_contrib_script_compatible_ec2_tag_keys = self.get_option("use_contrib_script_compatible_ec2_tag_keys") use_ssm_inventory = self.get_option("use_ssm_inventory") + if not all(isinstance(element, (dict, str)) for element in hostnames): + self.fail_aws("Hostnames should be a list of dict and str.") + if self.get_option("include_extra_api_calls"): self.display.deprecate( "The include_extra_api_calls option has been deprecated and will be removed in release 6.0.0.", diff --git a/plugins/module_utils/acm.py b/plugins/module_utils/acm.py index 114232d2f80..e7fee816f9f 100644 --- a/plugins/module_utils/acm.py +++ b/plugins/module_utils/acm.py @@ -63,7 +63,20 @@ def delete_certificate_with_backoff(self, arn): @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) def list_certificates_with_backoff(self, statuses=None): paginator = self.client.get_paginator("list_certificates") - kwargs = dict() + # `list_certificates` requires explicit key type filter, or it returns only RSA_2048 certificates + kwargs = { + "Includes": { + "keyTypes": [ + "RSA_1024", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "EC_prime256v1", + "EC_secp384r1", + "EC_secp521r1", + ], + }, + } if statuses: kwargs["CertificateStatuses"] = statuses return paginator.paginate(**kwargs).build_full_result()["CertificateSummaryList"] diff --git a/plugins/module_utils/arn.py b/plugins/module_utils/arn.py index 22d91b3f364..d62b4c4d800 100644 --- a/plugins/module_utils/arn.py +++ b/plugins/module_utils/arn.py @@ -6,14 +6,46 @@ import re +def validate_aws_arn( + arn, partition=None, service=None, region=None, account_id=None, resource=None, resource_type=None, resource_id=None +): + details = parse_aws_arn(arn) + + if not details: + return False + + if partition and details.get("partition") != partition: + return False + if service and details.get("service") != service: + return False + if region and details.get("region") != region: + return False + if account_id and details.get("account_id") != account_id: + return False + if resource and details.get("resource") != resource: + return False + if resource_type and details.get("resource_type") != resource_type: + return False + if resource_id and details.get("resource_id") != resource_id: + return False + + return True + + def parse_aws_arn(arn): """ + Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html + The following are the general formats for ARNs. arn:partition:service:region:account-id:resource-id arn:partition:service:region:account-id:resource-type/resource-id arn:partition:service:region:account-id:resource-type:resource-id The specific formats depend on the resource. The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID. + + Note: resource_type handling is very naive, for complex cases it may be necessary to use + "resource" directly instead of resource_type, this will include the resource type and full ID, + including all paths. """ m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn) if m is None: @@ -25,6 +57,12 @@ def parse_aws_arn(arn): result.update(dict(account_id=m.group(6))) result.update(dict(resource=m.group(7))) + m2 = re.search(r"^(.*?)[:/](.+)$", m.group(7)) + if m2 is None: + result.update(dict(resource_type=None, resource_id=m.group(7))) + else: + result.update(dict(resource_type=m2.group(1), resource_id=m2.group(2))) + return result diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index b456ab970cc..a9e3e6ed01b 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -144,7 +144,7 @@ def get_selection_details(module, client, plan_name: str, selection_name: Union[ if selection["SelectionName"] == selection_name: selection_id = selection["SelectionId"] result.append(_get_backup_selection(client, module, plan_id, selection_id)) - break + break else: for selection in selection_list: selection_id = selection["SelectionId"] diff --git a/plugins/module_utils/iam.py b/plugins/module_utils/iam.py index 3b08f1dac4e..cde0aff2b95 100644 --- a/plugins/module_utils/iam.py +++ b/plugins/module_utils/iam.py @@ -3,16 +3,73 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from copy import deepcopy + try: import botocore except ImportError: - pass + pass # Modules are responsible for handling this. from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from .retries import AWSRetry -from .botocore import is_boto3_error_code from .arn import parse_aws_arn +from .botocore import is_boto3_error_code +from .exceptions import AnsibleAWSError +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict + + +class AnsibleIAMError(AnsibleAWSError): + pass + + +@AWSRetry.jittered_backoff() +def _tag_iam_instance_profile(client, **kwargs): + client.tag_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _untag_iam_instance_profile(client, **kwargs): + client.untag_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _get_iam_instance_profiles(client, **kwargs): + return client.get_instance_profile(**kwargs)["InstanceProfile"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles_for_role(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles_for_role") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@AWSRetry.jittered_backoff() +def _create_instance_profile(client, **kwargs): + return client.create_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _delete_instance_profile(client, **kwargs): + client.delete_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _add_role_to_instance_profile(client, **kwargs): + client.add_role_to_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _remove_role_from_instance_profile(client, **kwargs): + client.remove_role_from_instance_profile(**kwargs) def get_aws_account_id(module): @@ -76,3 +133,150 @@ def get_aws_account_info(module): ) return (to_native(account_id), to_native(partition)) + + +def create_iam_instance_profile(client, name, path, tags): + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + try: + result = _create_instance_profile(client, InstanceProfileName=name, Path=path, Tags=boto3_tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to create instance profile", exception=e) + return result["InstanceProfile"] + + +def delete_iam_instance_profile(client, name): + try: + _delete_instance_profile(client, InstanceProfileName=name) + except is_boto3_error_code("NoSuchEntity"): + # Deletion already happened. + return False + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to delete instance profile", exception=e) + return True + + +def add_role_to_iam_instance_profile(client, profile_name, role_name): + try: + _add_role_to_instance_profile(client, InstanceProfileName=profile_name, RoleName=role_name) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError( + message="Unable to add role to instance profile", + exception=e, + profile_name=profile_name, + role_name=role_name, + ) + return True + + +def remove_role_from_iam_instance_profile(client, profile_name, role_name): + try: + _remove_role_from_instance_profile(client, InstanceProfileName=profile_name, RoleName=role_name) + except is_boto3_error_code("NoSuchEntity"): + # Deletion already happened. + return False + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError( + message="Unable to remove role from instance profile", + exception=e, + profile_name=profile_name, + role_name=role_name, + ) + return True + + +def list_iam_instance_profiles(client, name=None, prefix=None, role=None): + """ + Returns a list of IAM instance profiles in boto3 format. + Profiles need to be converted to Ansible format using normalize_iam_instance_profile before being displayed. + + See also: normalize_iam_instance_profile + """ + try: + if role: + return _list_iam_instance_profiles_for_role(client, RoleName=role) + if name: + # Unlike the others this returns a single result, make this a list with 1 element. + return [_get_iam_instance_profiles(client, InstanceProfileName=name)] + if prefix: + return _list_iam_instance_profiles(client, PathPrefix=prefix) + return _list_iam_instance_profiles(client) + except is_boto3_error_code("NoSuchEntity"): + return [] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to list instance profiles", exception=e) + + +def normalize_iam_instance_profile(profile): + """ + Converts a boto3 format IAM instance profile into "Ansible" format + """ + + new_profile = camel_dict_to_snake_dict(deepcopy(profile)) + if profile.get("Roles"): + new_profile["roles"] = [normalize_iam_role(role) for role in profile.get("Roles")] + if profile.get("Tags"): + new_profile["tags"] = boto3_tag_list_to_ansible_dict(profile.get("Tags")) + else: + new_profile["tags"] = {} + new_profile["original"] = profile + return new_profile + + +def normalize_iam_role(role): + """ + Converts a boto3 format IAM instance role into "Ansible" format + """ + + new_role = camel_dict_to_snake_dict(deepcopy(role)) + if role.get("InstanceProfiles"): + new_role["instance_profiles"] = [ + normalize_iam_instance_profile(profile) for profile in role.get("InstanceProfiles") + ] + if role.get("AssumeRolePolicyDocument"): + new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument") + if role.get("Tags"): + new_role["tags"] = boto3_tag_list_to_ansible_dict(role.get("Tags")) + else: + new_role["tags"] = {} + new_role["original"] = role + return new_role + + +def tag_iam_instance_profile(client, name, tags): + if not tags: + return + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + try: + result = _tag_iam_instance_profile(client, InstanceProfileName=name, Tags=boto3_tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to tag instance profile", exception=e) + + +def untag_iam_instance_profile(client, name, tags): + if not tags: + return + try: + result = _untag_iam_instance_profile(client, InstanceProfileName=name, TagKeys=tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to untag instance profile", exception=e) diff --git a/plugins/modules/autoscaling_group.py b/plugins/modules/autoscaling_group.py index 8ac27e2e062..56389400b28 100644 --- a/plugins/modules/autoscaling_group.py +++ b/plugins/modules/autoscaling_group.py @@ -901,12 +901,18 @@ def get_properties(autoscaling_group): if properties["target_group_arns"]: elbv2_connection = module.client("elbv2") tg_paginator = elbv2_connection.get_paginator("describe_target_groups") - tg_result = tg_paginator.paginate(TargetGroupArns=properties["target_group_arns"]).build_full_result() - target_groups = tg_result["TargetGroups"] + # Limit of 20 similar to https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html + tg_chunk_size = 20 + properties["target_group_names"] = [] + tg_chunks = [ + properties["target_group_arns"][i: i + tg_chunk_size] + for i in range(0, len(properties["target_group_arns"]), tg_chunk_size) + ] # fmt: skip + for chunk in tg_chunks: + tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() + properties["target_group_names"].extend([tg["TargetGroupName"] for tg in tg_result["TargetGroups"]]) else: - target_groups = [] - - properties["target_group_names"] = [tg["TargetGroupName"] for tg in target_groups] + properties["target_group_names"] = [] return properties diff --git a/plugins/modules/autoscaling_group_info.py b/plugins/modules/autoscaling_group_info.py index 4e3fe5c5eab..8ec36f1bc85 100644 --- a/plugins/modules/autoscaling_group_info.py +++ b/plugins/modules/autoscaling_group_info.py @@ -414,8 +414,18 @@ def find_asgs(conn, module, name=None, tags=None): if elbv2: try: tg_paginator = elbv2.get_paginator("describe_target_groups") - tg_result = tg_paginator.paginate(TargetGroupArns=asg["target_group_arns"]).build_full_result() - asg["target_group_names"] = [tg["TargetGroupName"] for tg in tg_result["TargetGroups"]] + # Limit of 20 similar to https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html + tg_chunk_size = 20 + asg["target_group_names"] = [] + tg_chunks = [ + asg["target_group_arns"][i: i + tg_chunk_size] + for i in range(0, len(asg["target_group_arns"]), tg_chunk_size) + ] # fmt: skip + for chunk in tg_chunks: + tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() + asg["target_group_names"].extend( + [tg["TargetGroupName"] for tg in tg_result["TargetGroups"]] + ) except is_boto3_error_code("TargetGroupNotFound"): asg["target_group_names"] = [] except ( diff --git a/plugins/modules/backup_plan.py b/plugins/modules/backup_plan.py index 10c4d461cbc..93bb132f2d1 100644 --- a/plugins/modules/backup_plan.py +++ b/plugins/modules/backup_plan.py @@ -257,9 +257,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters try: from botocore.exceptions import ClientError, BotoCoreError @@ -541,10 +542,9 @@ def main(): plan_name = module.params["backup_plan_name"] plan = { "backup_plan_name": module.params["backup_plan_name"], - "rules": [{k: v for k, v in rule.items() if v is not None} for rule in module.params["rules"] or []], + "rules": [scrub_none_parameters(rule) for rule in module.params["rules"] or []], "advanced_backup_settings": [ - {k: v for k, v in setting.items() if v is not None} - for setting in module.params["advanced_backup_settings"] or [] + scrub_none_parameters(setting) for setting in module.params["advanced_backup_settings"] or [] ], } tags = module.params["tags"] diff --git a/plugins/modules/backup_selection.py b/plugins/modules/backup_selection.py index e6edc251a31..6dd7454e0ee 100644 --- a/plugins/modules/backup_selection.py +++ b/plugins/modules/backup_selection.py @@ -43,13 +43,14 @@ description: - A list of conditions that you define to assign resources to your backup plans using tags. - Condition operators are case sensitive. + - When you specify more than one condition in I(list_of_tags), you assign all resources that match AT LEAST ONE condition (using OR logic). type: list elements: dict suboptions: condition_type: description: - An operation applied to a key-value pair used to assign resources to your backup plan. - - Condition only supports C(StringEquals). + - Condition only supports C(string_equals). type: str condition_key: description: @@ -69,8 +70,71 @@ conditions: description: - A list of conditions (expressed as a dict) that you define to assign resources to your backup plans using tags. - - I(conditions) supports C(StringEquals), C(StringLike), C(StringNotEquals), and C(StringNotLike). I(list_of_tags) only supports C(StringEquals). + - When you specify more than one condition in I(conditions), you only assign the resources that match ALL conditions (using AND logic). + - I(conditions) supports C(string_equals), C(string_like), C(string_not_equals), and C(string_not_like). I(list_of_tags) only supports C(string_equals). type: dict + suboptions: + string_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged with the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_like: + description: + - Filters the values of your tagged resources for matching tag values with the use of a wildcard character (*) anywhere in the string. + For example, "prod*" or "*rod*" matches the tag value "production". + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged that do not have the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_like: + description: + - Filters the values of your tagged resources for non-matching tag values with the use of a wildcard character (*) anywhere in the string. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str state: description: - Create, delete a backup selection. @@ -220,7 +284,47 @@ def main(): backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), iam_role_arn=dict(type="str"), resources=dict(type="list", elements="str"), - conditions=dict(type="dict"), + conditions=dict( + type="dict", + options=dict( + string_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + ), + ), not_resources=dict(type="list", elements="str"), list_of_tags=dict( type="list", @@ -271,8 +375,7 @@ def main(): if current_selection: results["exists"] = True - update_needed |= check_for_update(current_selection, backup_selection_data, iam_role_arn) - + update_needed = check_for_update(current_selection, backup_selection_data, iam_role_arn) if update_needed: if module.check_mode: results["changed"] = True diff --git a/plugins/modules/backup_vault.py b/plugins/modules/backup_vault.py index 8d7452431ef..5ae309d1059 100644 --- a/plugins/modules/backup_vault.py +++ b/plugins/modules/backup_vault.py @@ -93,7 +93,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict @@ -150,17 +149,14 @@ def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): return True if tags_to_remove: - remove = {k: curr_tags[k] for k in tags_to_remove} - tags_to_remove = ansible_dict_to_boto3_tag_list(remove) try: - client.remove_tags(ResourceId=vault_arn, TagsList=tags_to_remove) + client.untag_resource(ResourceArn=vault_arn, TagKeyList=tags_to_remove) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to remove tags from the vault") if tags_to_add: - tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_add) try: - client.add_tags(ResourceId=vault_arn, TagsList=tags_to_add) + client.tag_resource(ResourceArn=vault_arn, Tags=tags_to_add) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to add tags to Vault") diff --git a/plugins/modules/cloudwatchevent_rule.py b/plugins/modules/cloudwatchevent_rule.py index 828e75533b2..8f6f1fe5734 100644 --- a/plugins/modules/cloudwatchevent_rule.py +++ b/plugins/modules/cloudwatchevent_rule.py @@ -433,6 +433,18 @@ def _rule_matches_aws(self): def _targets_to_put(self): """Returns a list of targets that need to be updated or added remotely""" remote_targets = self.rule.list_targets() + + # keys with none values must be scrubbed off of self.targets + temp = [] + for t in self.targets: + if t["input_transformer"] is not None and t["input_transformer"]["input_template"] is not None: + # The remote_targets contain quotes, so add + # quotes to temp + val = t["input_transformer"]["input_template"] + t["input_transformer"]["input_template"] = '"' + val + '"' + temp.append(scrub_none_parameters(t)) + self.targets = temp + return [t for t in self.targets if t not in remote_targets] def _remote_target_ids_to_remove(self): diff --git a/plugins/modules/ec2_instance.py b/plugins/modules/ec2_instance.py index 9847e36cfc2..6c5a7e9f713 100644 --- a/plugins/modules/ec2_instance.py +++ b/plugins/modules/ec2_instance.py @@ -975,7 +975,7 @@ from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags @@ -1510,16 +1510,19 @@ def change_network_attachments(instance, params): # network.interfaces can create the need to attach new interfaces old_ids = [inty["NetworkInterfaceId"] for inty in instance["NetworkInterfaces"]] to_attach = set(new_ids) - set(old_ids) - for eni_id in to_attach: - try: - client.attach_network_interface( - aws_retry=True, - DeviceIndex=new_ids.index(eni_id), - InstanceId=instance["InstanceId"], - NetworkInterfaceId=eni_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Could not attach interface {eni_id} to instance {instance['InstanceId']}") + if not module.check_mode: + for eni_id in to_attach: + try: + client.attach_network_interface( + aws_retry=True, + DeviceIndex=new_ids.index(eni_id), + InstanceId=instance["InstanceId"], + NetworkInterfaceId=eni_id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Could not attach interface {eni_id} to instance {instance['InstanceId']}" + ) return bool(len(to_attach)) return False @@ -1788,8 +1791,7 @@ def pretty_instance(i): def determine_iam_role(name_or_arn): - result = parse_aws_arn(name_or_arn) - if result and result["service"] == "iam" and result["resource"].startswith("instance-profile/"): + if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): return name_or_arn iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: diff --git a/plugins/modules/ec2_instance_info.py b/plugins/modules/ec2_instance_info.py index 484f54f24c1..6977edb9700 100644 --- a/plugins/modules/ec2_instance_info.py +++ b/plugins/modules/ec2_instance_info.py @@ -36,6 +36,30 @@ required: false aliases: ['uptime'] type: int + include_attributes: + description: + - Describes the specified attributes of the returned instances. + required: false + type: list + elements: str + choices: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + - disableApiStop + aliases: ['attributes'] + version_added: 6.3.0 extends_documentation_fragment: - amazon.aws.common.modules @@ -77,6 +101,13 @@ "tag:Name": "RHEL-*" instance-state-name: [ "running"] register: ec2_node_info + +- name: Gather information about a particular instance using ID and include kernel attribute + amazon.aws.ec2_instance_info: + instance_ids: + - i-12345678 + include_attributes: + - kernel """ RETURN = r""" @@ -500,6 +531,20 @@ returned: always type: dict sample: vpc-0011223344 + attributes: + description: The details of the instance attribute specified on input. + returned: when include_attribute is specified + type: dict + sample: + { + 'disable_api_termination': { + 'value': True + }, + 'ebs_optimized': { + 'value': True + } + } + version_added: 6.3.0 """ import datetime @@ -549,6 +594,12 @@ def list_ec2_instances(connection, module): for reservation in reservations["Reservations"]: instances = instances + reservation["Instances"] + # include instances attributes + attributes = module.params.get("include_attributes") + if attributes: + for instance in instances: + instance["attributes"] = describe_instance_attributes(connection, instance["InstanceId"], attributes) + # Turn the boto3 result in to ansible_friendly_snaked_names snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] @@ -559,11 +610,39 @@ def list_ec2_instances(connection, module): module.exit_json(instances=snaked_instances) +def describe_instance_attributes(connection, instance_id, attributes): + result = {} + for attr in attributes: + response = connection.describe_instance_attribute(Attribute=attr, InstanceId=instance_id) + for key in response: + if key not in ("InstanceId", "ResponseMetadata"): + result[key] = response[key] + return result + + def main(): + instance_attributes = [ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport", + "enclaveOptions", + "disableApiStop", + ] argument_spec = dict( minimum_uptime=dict(required=False, type="int", default=None, aliases=["uptime"]), instance_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict"), + include_attributes=dict(type="list", elements="str", aliases=["attributes"], choices=instance_attributes), ) module = AnsibleAWSModule( diff --git a/plugins/modules/ec2_metadata_facts.py b/plugins/modules/ec2_metadata_facts.py index b8288d2d912..9d043bf218d 100644 --- a/plugins/modules/ec2_metadata_facts.py +++ b/plugins/modules/ec2_metadata_facts.py @@ -438,6 +438,7 @@ import re import socket import time +import zlib from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text @@ -484,11 +485,42 @@ def __init__( self._token = None self._prefix = "ansible_ec2_%s" + def _decode(self, data): + try: + return data.decode("utf-8") + except UnicodeDecodeError: + # Decoding as UTF-8 failed, return data without raising an error + self.module.warn("Decoding user-data as UTF-8 failed, return data as is ignoring any error") + return data.decode("utf-8", errors="ignore") + + def decode_user_data(self, data): + is_compressed = False + + # Check if data is compressed using zlib header + if data.startswith(b"\x78\x9c") or data.startswith(b"\x1f\x8b"): + is_compressed = True + + if is_compressed: + # Data is compressed, attempt decompression and decode using UTF-8 + try: + decompressed = zlib.decompress(data, zlib.MAX_WBITS | 32) + return self._decode(decompressed) + except zlib.error: + # Unable to decompress, return original data + self.module.warn( + "Unable to decompress user-data using zlib, attempt to decode original user-data as UTF-8" + ) + return self._decode(data) + else: + # Data is not compressed, decode using UTF-8 + return self._decode(data) + def _fetch(self, url): encoded_url = quote(url, safe="%/:=&?~#+!$,;'@()*[]") headers = {} if self._token: headers = {"X-aws-ec2-metadata-token": self._token} + response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) if info.get("status") in (401, 403): @@ -505,6 +537,8 @@ def _fetch(self, url): ) if response and info["status"] < 400: data = response.read() + if "user-data" in encoded_url: + return to_text(self.decode_user_data(data)) else: data = None return to_text(data) diff --git a/plugins/modules/ec2_vpc_nat_gateway.py b/plugins/modules/ec2_vpc_nat_gateway.py index 9c0229906ac..3586a98806e 100644 --- a/plugins/modules/ec2_vpc_nat_gateway.py +++ b/plugins/modules/ec2_vpc_nat_gateway.py @@ -75,6 +75,16 @@ When specifying this option, ensure you specify the eip_address parameter as well otherwise any subsequent runs will fail. type: str + default_create: + description: + - When I(default_create=True) and I(eip_address) has been set, but not yet + allocated, the NAT gateway is created and a new EIP is automatically allocated. + - When I(default_create=False) and I(eip_address) has been set, but not yet + allocated, the module will fail. + - If I(eip_address) has not been set, this parameter has no effect. + default: false + type: bool + version_added: 6.2.0 author: - Allen Sanabria (@linuxdynasty) - Jon Hadfield (@jonhadfield) @@ -660,6 +670,7 @@ def pre_create( wait=False, client_token=None, connectivity_type="public", + default_create=False, ): """Create an Amazon NAT Gateway. Args: @@ -681,6 +692,8 @@ def pre_create( default = False client_token (str): default = None + default_create (bool): create a NAT gateway even if EIP address is not found. + default = False Basic Usage: >>> client = boto3.client('ec2') @@ -736,17 +749,34 @@ def pre_create( msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results else: - changed, msg, allocation_id = allocate_eip_address(client, module) + if connectivity_type == "public": + changed, msg, allocation_id = allocate_eip_address(client, module) - if not changed: - return changed, msg, dict() + if not changed: + return changed, msg, dict() elif eip_address or allocation_id: if eip_address and not allocation_id: allocation_id, msg = get_eip_allocation_id_by_address(client, module, eip_address) - if not allocation_id: + if not allocation_id and not default_create: changed = False - return changed, msg, dict() + module.fail_json(msg=msg) + elif not allocation_id and default_create: + eip_address = None + return pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id, + eip_address, + if_exist_do_not_create, + wait, + client_token, + connectivity_type, + default_create, + ) existing_gateways, allocation_id_exists = gateway_in_subnet_exists(client, module, subnet_id, allocation_id) @@ -869,6 +899,7 @@ def main(): client_token=dict(type="str", no_log=False), tags=dict(required=False, type="dict", aliases=["resource_tags"]), purge_tags=dict(default=True, type="bool"), + default_create=dict(type="bool", default=False), ) module = AnsibleAWSModule( @@ -890,6 +921,7 @@ def main(): if_exist_do_not_create = module.params.get("if_exist_do_not_create") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") + default_create = module.params.get("default_create") try: client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) @@ -912,6 +944,7 @@ def main(): wait, client_token, connectivity_type, + default_create, ) else: changed, msg, results = remove(client, module, nat_gateway_id, wait, release_eip, connectivity_type) diff --git a/plugins/modules/iam_instance_profile.py b/plugins/modules/iam_instance_profile.py new file mode 100644 index 00000000000..b34b6b1305b --- /dev/null +++ b/plugins/modules/iam_instance_profile.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile +version_added: 6.2.0 +short_description: manage IAM instance profiles +description: + - Manage IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + state: + description: + - Desired state of the instance profile. + type: str + choices: ["absent", "present"] + default: "present" + name: + description: + - Name of an instance profile. + aliases: + - instance_profile_name + type: str + required: True + prefix: + description: + - The path prefix for filtering the results. + aliases: ["path_prefix", "path"] + type: str + default: "/" + role: + description: + - The name of the role to attach to the instance profile. + - To remove all roles from the instance profile set I(role=""). + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Find all existing IAM instance profiles + amazon.aws.iam_instance_profile_info: + register: result + +- name: Describe a single instance profile + amazon.aws.iam_instance_profile_info: + name: MyIAMProfile + register: result + +- name: Find all IAM instance profiles starting with /some/path/ + amazon.aws.iam_instance_profile_info: + prefile: /some/path/ + register: result +""" + +RETURN = r""" +iam_instance_profile: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + tags: + description: Instance profile tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + +from copy import deepcopy + +from ansible_collections.amazon.aws.plugins.module_utils.iam import add_role_to_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import create_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import delete_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import remove_role_from_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import tag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import untag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def describe_iam_instance_profile(client, name, prefix): + profiles = [] + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + + if not profiles: + return None + + return normalize_iam_instance_profile(profiles[0]) + + +def create_instance_profile(client, name, path, tags, check_mode): + if check_mode: + return True, {"instance_profile_name": name, "path": path, "tags": tags or {}, "roles": []} + + profile = create_iam_instance_profile(client, name, path, tags) + return True, normalize_iam_instance_profile(profile) + + +def ensure_tags( + original_profile, + client, + name, + tags, + purge_tags, + check_mode, +): + if tags is None: + return False, original_profile + + original_tags = original_profile.get("tags") or {} + + tags_to_set, tag_keys_to_unset = compare_aws_tags(original_tags, tags, purge_tags) + if not tags_to_set and not tag_keys_to_unset: + return False, original_profile + + new_profile = deepcopy(original_profile) + desired_tags = deepcopy(original_tags) + + for key in tag_keys_to_unset: + desired_tags.pop(key, None) + desired_tags.update(tags_to_set) + new_profile["tags"] = desired_tags + + if not check_mode: + untag_iam_instance_profile(client, name, tag_keys_to_unset) + tag_iam_instance_profile(client, name, tags_to_set) + + return True, new_profile + + +def ensure_role( + original_profile, + client, + name, + role, + check_mode, +): + if role is None: + return False, original_profile + + if role == "" and not original_profile.get("roles"): + return False, original_profile + else: + desired_role = [] + + if original_profile.get("roles") and original_profile.get("roles")[0].get("role_name", None) == role: + return False, original_profile + else: + desired_role = [{"role_name": role}] + + new_profile = deepcopy(original_profile) + new_profile["roles"] = desired_role + + if check_mode: + return True, new_profile + + if original_profile.get("roles"): + # We're changing the role, so we always need to remove the existing one first + remove_role_from_iam_instance_profile(client, name, original_profile["roles"][0]["role_name"]) + if role: + add_role_to_iam_instance_profile(client, name, role) + + return True, new_profile + + +def ensure_present( + original_profile, + client, + name, + path, + tags, + purge_tags, + role, + check_mode, +): + changed = False + if not original_profile: + changed, new_profile = create_instance_profile( + client, + name=name, + path=path, + tags=tags, + check_mode=check_mode, + ) + else: + new_profile = deepcopy(original_profile) + + role_changed, new_profile = ensure_role( + new_profile, + client, + name, + role, + check_mode, + ) + + tags_changed, new_profile = ensure_tags( + new_profile, + client, + name, + tags, + purge_tags, + check_mode, + ) + + changed |= role_changed + changed |= tags_changed + + return changed, new_profile + + +def ensure_absent( + original_profile, + client, + name, + prefix, + check_mode, +): + if not original_profile: + return False + + if check_mode: + return True + + roles = original_profile.get("roles") or [] + for role in roles: + remove_role_from_iam_instance_profile(client, name, role.get("role_name")) + + return delete_iam_instance_profile(client, name) + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"], required=True), + prefix=dict(aliases=["path_prefix", "path"], default="/"), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(aliases=["resource_tags"], type="dict"), + purge_tags=dict(type="bool", default=True), + role=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + try: + name = module.params["name"] + prefix = module.params["prefix"] + state = module.params["state"] + + original_profile = describe_iam_instance_profile(client, name, prefix) + + if state == "absent": + changed = ensure_absent( + original_profile, + client, + name, + prefix, + module.check_mode, + ) + final_profile = None + else: + changed, final_profile = ensure_present( + original_profile, + client, + name, + prefix, + module.params["tags"], + module.params["purge_tags"], + module.params["role"], + module.check_mode, + ) + + if not module.check_mode: + final_profile = describe_iam_instance_profile(client, name, prefix) + + except AnsibleIAMError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) + + results = { + "changed": changed, + "iam_instance_profile": final_profile, + } + if changed: + results["diff"] = { + "before": original_profile, + "after": final_profile, + } + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/iam_instance_profile_info.py b/plugins/modules/iam_instance_profile_info.py new file mode 100644 index 00000000000..6b7f5ed2468 --- /dev/null +++ b/plugins/modules/iam_instance_profile_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile_info +version_added: 6.2.0 +short_description: gather information on IAM instance profiles +description: + - Gathers information about IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + name: + description: + - Name of an instance profile to search for. + - Mutually exclusive with I(prefix). + aliases: + - instance_profile_name + type: str + path_prefix: + description: + - The path prefix for filtering the results. + - Mutually exclusive with I(name). + aliases: ["path", "prefix"] + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Find all existing IAM instance profiles + amazon.aws.iam_instance_profile_info: + register: result + +- name: Describe a single instance profile + amazon.aws.iam_instance_profile_info: + name: MyIAMProfile + register: result + +- name: Find all IAM instance profiles starting with /some/path/ + amazon.aws.iam_instance_profile_info: + prefile: /some/path/ + register: result +""" + +RETURN = r""" +iam_instance_profiles: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def describe_iam_instance_profiles(module, client): + name = module.params["name"] + prefix = module.params["path_prefix"] + profiles = [] + try: + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + except AnsibleIAMError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) + + return [normalize_iam_instance_profile(p) for p in profiles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"]), + path_prefix=dict(aliases=["path", "prefix"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + module.exit_json(changed=False, iam_instance_profiles=describe_iam_instance_profiles(module, client)) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/iam_user.py b/plugins/modules/iam_user.py index 75d412a6bc9..f4f1483cdde 100644 --- a/plugins/modules/iam_user.py +++ b/plugins/modules/iam_user.py @@ -182,8 +182,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -208,7 +209,7 @@ def convert_friendly_names_to_arns(connection, module, policy_names): # List comprehension that looks for any policy in the 'policy_names' list # that does not begin with 'arn'. If there aren't any, short circuit. # If there are, translate friendly name to the full arn - if not any(not policy.startswith("arn:") for policy in policy_names if policy is not None): + if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): return policy_names allpolicies = {} paginator = connection.get_paginator("list_policies") @@ -218,7 +219,7 @@ def convert_friendly_names_to_arns(connection, module, policy_names): allpolicies[policy["PolicyName"]] = policy["Arn"] allpolicies[policy["Arn"]] = policy["Arn"] try: - return [allpolicies[policy] for policy in policy_names] + return [allpolicies[policy] for policy in policy_names if policy is not None] except KeyError as e: module.fail_json(msg="Couldn't find policy: " + str(e)) diff --git a/plugins/modules/lambda_execute.py b/plugins/modules/lambda_execute.py index 7f4b7aea109..10d38d1f59d 100644 --- a/plugins/modules/lambda_execute.py +++ b/plugins/modules/lambda_execute.py @@ -262,14 +262,10 @@ def main(): "Function executed, but there was an error in the Lambda function. " "Message: {errmsg}, Type: {type}, Stack Trace: {trace}" ) + error_data = { # format the stacktrace sent back as an array into a multiline string - "trace": "\n".join( - [ - " ".join([str(x) for x in line]) # cast line numbers to strings - for line in results.get("output", {}).get("stackTrace", []) - ] - ), + "trace": "\n".join(results.get("output", {}).get("stackTrace", [])), "errmsg": results["output"].get("errorMessage"), "type": results["output"].get("errorType"), } diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index ab80af2df13..6607f21dc9b 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -24,11 +24,11 @@ # General module options state: description: - - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state + - Desired state of the RDS Instance. + - I(state=rebooted) is not idempotent and will leave the DB instance in a running state and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state, (running if creating the DB instance). - - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance - is not idempotent. + - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'] default: 'present' type: str @@ -250,7 +250,7 @@ master_user_password: description: - An 8-41 character password for the master database user. The password can contain any printable ASCII character - except C(/), C("), or C(@). To modify the password use I(force_update_password). Use I(apply immediately) to change + except C(/), C("), or C(@). To modify the password use I(force_update_password). Use I(apply_immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: - password diff --git a/plugins/modules/s3_object.py b/plugins/modules/s3_object.py index 84f2c0c7b5c..8f36df398f0 100644 --- a/plugins/modules/s3_object.py +++ b/plugins/modules/s3_object.py @@ -228,11 +228,19 @@ type: str description: - key name of the source object. - required: true + - if not specified, all the objects of the I(copy_src.bucket) will be copied into the specified bucket. + required: false version_id: type: str description: - version ID of the source object. + prefix: + description: + - Copy all the keys that begin with the specified prefix. + - Ignored if I(copy_src.object) is supplied. + default: "" + type: str + version_added: 6.2.0 validate_bucket_name: description: - Whether the bucket name should be validated to conform to AWS S3 naming rules. @@ -370,6 +378,14 @@ copy_src: bucket: srcbucket object: /source/key.txt + +- name: Copy all the objects with name starting with 'ansible_' + amazon.aws.s3_object: + bucket: mybucket + mode: copy + copy_src: + bucket: srcbucket + prefix: 'ansible_' """ RETURN = r""" @@ -566,7 +582,7 @@ def paginated_versioned_list_with_fallback(s3, **pagination_params): yield [{"Key": key}] -def list_keys(module, s3, bucket, prefix, marker, max_keys): +def list_keys(s3, bucket, prefix=None, marker=None, max_keys=None): pagination_params = { "Bucket": bucket, "Prefix": prefix, @@ -576,8 +592,7 @@ def list_keys(module, s3, bucket, prefix, marker, max_keys): pagination_params = {k: v for k, v in pagination_params.items() if v} try: - keys = list(paginated_list(s3, **pagination_params)) - module.exit_json(msg="LIST operation complete", s3_keys=keys) + return list(paginated_list(s3, **pagination_params)) except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, @@ -892,78 +907,6 @@ def put_download_url(s3, bucket, obj, expiry): return url -def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag): - if module.check_mode: - module.exit_json(msg="COPY operation skipped - running in check mode", changed=True) - try: - params = {"Bucket": bucket, "Key": obj} - bucketsrc = { - "Bucket": module.params["copy_src"].get("bucket"), - "Key": module.params["copy_src"].get("object"), - } - version = None - if module.params["copy_src"].get("version_id"): - version = module.params["copy_src"].get("version_id") - bucketsrc.update({"VersionId": version}) - if not key_check( - module, - s3, - bucketsrc["Bucket"], - bucketsrc["Key"], - version=version, - validate=validate, - ): - # Key does not exist in source bucket - module.exit_json( - msg=f"Key {bucketsrc['Key']} does not exist in bucket {bucketsrc['Bucket']}.", - changed=False, - ) - - s_etag = get_etag(s3, bucketsrc["Bucket"], bucketsrc["Key"], version=version) - if s_etag == d_etag: - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - if not changed: - module.exit_json( - msg="ETag from source and destination are the same", - changed=False, - ) - else: - module.exit_json( - msg="tags successfully updated.", - changed=changed, - tags=tags, - ) - else: - params.update({"CopySource": bucketsrc}) - params.update( - get_extra_params( - encrypt, - module.params.get("encryption_mode"), - module.params.get("encryption_kms_key_id"), - metadata, - ) - ) - s3.copy_object(aws_retry=True, **params) - put_object_acl(module, s3, bucket, obj) - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - module.exit_json( - msg=f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}.", - tags=tags, - changed=True, - ) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - boto3.exceptions.Boto3Error, - ) as e: # pylint: disable=duplicate-except - raise S3ObjectFailure( - f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", - e, - ) - - def get_current_object_tags_dict(module, s3, bucket, obj, version=None): try: if version: @@ -1230,8 +1173,7 @@ def s3_object_do_delobj(module, connection, connection_v4, s3_vars): def s3_object_do_list(module, connection, connection_v4, s3_vars): # If the bucket does not exist then bail out - list_keys( - module, + keys = list_keys( connection, s3_vars["bucket"], s3_vars["prefix"], @@ -1239,6 +1181,8 @@ def s3_object_do_list(module, connection, connection_v4, s3_vars): s3_vars["max_keys"], ) + module.exit_json(msg="LIST operation complete", s3_keys=keys) + def s3_object_do_create(module, connection, connection_v4, s3_vars): # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified @@ -1330,23 +1274,132 @@ def s3_object_do_getstr(module, connection, connection_v4, s3_vars): module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") +def check_object_tags(module, connection, bucket, obj): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + diff = False + if tags: + current_tags_dict = get_current_object_tags_dict(module, connection, bucket, obj) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_tags_dict.update(tags) + diff = current_tags_dict != tags + return diff + + +def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, src_bucket, src_obj, versionId=None): + try: + params = {"Bucket": bucket, "Key": obj} + if not key_check(module, s3, src_bucket, src_obj, version=versionId, validate=validate): + # Key does not exist in source bucket + module.exit_json( + msg=f"Key {src_obj} does not exist in bucket {src_bucket}.", + changed=False, + ) + + s_etag = get_etag(s3, src_bucket, src_obj, version=versionId) + d_etag = get_etag(s3, bucket, obj) + if s_etag == d_etag: + if module.check_mode: + changed = check_object_tags(module, s3, bucket, obj) + result = {} + if changed: + result.update({"msg": "Would have update object tags is not running in check mode."}) + return changed, result + + # Ensure tags + tags, changed = ensure_tags(s3, module, bucket, obj) + result = {"msg": "ETag from source and destination are the same"} + if changed: + result = {"msg": "tags successfully updated.", "tags": tags} + return changed, result + elif module.check_mode: + return True, {"msg": "ETag from source and destination differ"} + else: + changed = True + bucketsrc = { + "Bucket": src_bucket, + "Key": src_obj, + } + if versionId: + bucketsrc.update({"VersionId": versionId}) + params.update({"CopySource": bucketsrc}) + params.update( + get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + metadata, + ) + ) + s3.copy_object(aws_retry=True, **params) + put_object_acl(module, s3, bucket, obj) + # Tags + tags, tags_updated = ensure_tags(s3, module, bucket, obj) + msg = f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}." + return changed, {"msg": msg, "tags": tags} + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure( + f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", + e, + ) + + def s3_object_do_copy(module, connection, connection_v4, s3_vars): - # if copying an object in a bucket yet to be created, acls for the bucket and/or the object may be specified - # these were separated into the variables bucket_acl and object_acl above - d_etag = get_etag(connection, s3_vars["bucket"], s3_vars["object"]) - if not s3_vars["acl_disabled"]: - # only use valid object acls for the copy operation - s3_vars["permission"] = s3_vars["object_acl"] - copy_object_to_bucket( - module, - connection, - s3_vars["bucket"], - s3_vars["object"], - s3_vars["encrypt"], - s3_vars["metadata"], - s3_vars["validate"], - d_etag, - ) + copy_src = module.params.get("copy_src") + if not copy_src.get("object") and s3_vars["object"]: + module.fail_json( + msg="A destination object was specified while trying to copy all the objects from the source bucket." + ) + src_bucket = copy_src.get("bucket") + if not copy_src.get("object"): + # copy recursively object(s) from source bucket to destination bucket + # list all the objects from the source bucket + keys = list_keys(connection, src_bucket, copy_src.get("prefix")) + if len(keys) == 0: + module.exit_json(msg=f"No object found to be copied from source bucket {src_bucket}.") + + changed = False + number_keys_updated = 0 + for key in keys: + updated, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + key, + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + key, + versionId=copy_src.get("version_id"), + ) + changed |= updated + number_keys_updated += 1 if updated else 0 + + msg = "object(s) from buckets '{0}' and '{1}' are the same.".format(src_bucket, s3_vars["bucket"]) + if number_keys_updated: + msg = "{0} copied into bucket '{1}'".format(number_keys_updated, s3_vars["bucket"]) + module.exit_json(changed=changed, msg=msg) + else: + # copy single object from source bucket into destination bucket + changed, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + copy_src.get("object"), + versionId=copy_src.get("version_id"), + ) + module.exit_json(changed=changed, **result) def populate_params(module): @@ -1459,7 +1512,8 @@ def main(): type="dict", options=dict( bucket=dict(required=True), - object=dict(required=True), + object=dict(), + prefix=dict(default=""), version_id=dict(), ), ), diff --git a/pyproject.toml b/pyproject.toml index 34fb52ea61b..b78e8bd0e3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,3 +16,20 @@ src = [ "tests/unit", "tests/integration", ] + +[tool.isort] +profile = "black" +force_single_line = true +line_length = 120 + +src_paths = [ + "plugins", + "tests/unit", + "tests/integration", +] + +sections=["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "ANSIBLE_CORE", "ANSIBLE_AMAZON_AWS", "ANSIBLE_COMMUNITY_AWS", "LOCALFOLDER"] +known_third_party=["botocore", "boto3"] +known_ansible_core=["ansible"] +known_ansible_amazon_aws=["ansible_collections.amazon.aws"] +known_ansible_community_aws=["ansible_collections.community.aws"] diff --git a/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml b/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml index bea9e82163f..456973ce417 100644 --- a/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml +++ b/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml @@ -474,8 +474,8 @@ # Target group names have max length of 32 characters - set_fact: - tg1_name: "{{ (resource_prefix + '-tg1' ) | regex_search('(.{1,32})$') }}" - tg2_name: "{{ (resource_prefix + '-tg2' ) | regex_search('(.{1,32})$') }}" + tg1_name: "ansible-test-{{tiny_prefix}}-asg-t1" + tg2_name: "ansible-test-{{tiny_prefix}}-asg-t2" - name: create target group 1 elb_target_group: name: '{{ tg1_name }}' diff --git a/tests/integration/targets/backup_plan/tasks/main.yml b/tests/integration/targets/backup_plan/tasks/main.yml index 082db390cd3..86e35019031 100644 --- a/tests/integration/targets/backup_plan/tasks/main.yml +++ b/tests/integration/targets/backup_plan/tasks/main.yml @@ -119,8 +119,8 @@ copy_actions: - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" lifecycle: - move_to_cold_storage_after_days: 90 delete_after_days: 300 + move_to_cold_storage_after_days: 90 tags: status: archive register: backup_plan_update_result @@ -137,6 +137,133 @@ - backup_plan_update_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules - backup_plan_update_result.tags != backup_plan_info.backup_plans[0].tags + - name: Get updated backup plan details + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Update backup plan without nested optional values in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + check_mode: true + register: check_mode_update_without_nested_optional_values_result + + - name: Verify backup plan update without nested optional values in check mode result + ansible.builtin.assert: + that: + - check_mode_update_without_nested_optional_values_result.exists is true + - check_mode_update_without_nested_optional_values_result.changed is true + - check_mode_update_without_nested_optional_values_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + + - name: Get backup plan details after update in check mode + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info_after_check_mode_update + + - name: Verify backup plan was not actually updated in check mode + ansible.builtin.assert: + that: + - backup_plan_info_after_check_mode_update.backup_plans[0] == backup_plan_info.backup_plans[0] + + - name: Update backup plan without nested optional values + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: update_without_nested_optional_values_result + + - name: Verify backup plan update without nested optional values result + ansible.builtin.assert: + that: + - update_without_nested_optional_values_result.exists is true + - update_without_nested_optional_values_result.changed is true + - update_without_nested_optional_values_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - update_without_nested_optional_values_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - update_without_nested_optional_values_result.creation_date != backup_plan_info.backup_plans[0].creation_date + - update_without_nested_optional_values_result.version_id != backup_plan_info.backup_plans[0].version_id + - update_without_nested_optional_values_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - update_without_nested_optional_values_result.tags == backup_plan_info.backup_plans[0].tags + + - name: Get updated backup plan details + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: updated_backup_plan_info + + - name: Verify backup plan was actually updated + ansible.builtin.assert: + that: + - updated_backup_plan_info.backup_plans[0].backup_plan_name == backup_plan_info.backup_plans[0].backup_plan_name + - updated_backup_plan_info.backup_plans[0].backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - updated_backup_plan_info.backup_plans[0].version_id != backup_plan_info.backup_plans[0].version_id + - updated_backup_plan_info.backup_plans[0].backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - updated_backup_plan_info.backup_plans[0].tags == backup_plan_info.backup_plans[0].tags + + - name: Update backup plan without nested optional values - idempotency + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: update_without_nested_optional_values_idempotency_result + + - name: Verify backup plan update without nested optional values idempotency result + ansible.builtin.assert: + that: + - update_without_nested_optional_values_idempotency_result.exists is true + - update_without_nested_optional_values_idempotency_result.changed is false + - update_without_nested_optional_values_idempotency_result.backup_plan_id == updated_backup_plan_info.backup_plans[0].backup_plan_id + - update_without_nested_optional_values_idempotency_result.backup_plan_arn == updated_backup_plan_info.backup_plans[0].backup_plan_arn + - update_without_nested_optional_values_idempotency_result.creation_date == updated_backup_plan_info.backup_plans[0].creation_date + - update_without_nested_optional_values_idempotency_result.version_id == updated_backup_plan_info.backup_plans[0].version_id + - update_without_nested_optional_values_idempotency_result.backup_plan.rules == updated_backup_plan_info.backup_plans[0].backup_plan.rules + - update_without_nested_optional_values_idempotency_result.tags == updated_backup_plan_info.backup_plans[0].tags + - name: Delete backup plan in check mode amazon.aws.backup_plan: backup_plan_name: "{{ backup_plan_name }}" diff --git a/tests/integration/targets/backup_selection/tasks/main.yml b/tests/integration/targets/backup_selection/tasks/main.yml index c29d738b3c3..aba34a2ba36 100644 --- a/tests/integration/targets/backup_selection/tasks/main.yml +++ b/tests/integration/targets/backup_selection/tasks/main.yml @@ -18,60 +18,36 @@ wait: true register: iam_role - # Wait for the role to be created - - pause: - seconds: 5 + - name: Wait for the role to be created + ansible.builtin.pause: + seconds: 8 - name: Create an AWS Backup vault for the plan to target amazon.aws.backup_vault: backup_vault_name: "{{ backup_vault_name }}" - register: _resutl_create_backup_vault + register: _result_create_backup_vault - name: Verify result ansible.builtin.assert: that: - - _resutl_create_backup_vault.changed - - # - name: Create an AWS Backup plan - # amazon.aws.backup_plan: - # backup_plan_name: "{{ backup_plan_name }}" - # rules: - # - RuleName: DailyBackups - # TargetBackupVaultName: "{{ backup_vault_name }}" - # ScheduleExpression: "cron(0 5 ? * * *)" - # StartWindowMinutes: 60 - # CompletionWindowMinutes: 1440 - # tags: - # environment: test - # register: _resutl_create_backup_plan - - # - name: Verify result - # ansible.builtin.assert: - # that: - # - _resutl_create_backup_plan.changed - - # - name: Get detailed information about the AWS Backup plan - # amazon.aws.backup_plan_info: - # backup_plan_names: - # - "{{ backup_plan_name }}" - # register: _result_backup_plan_info - - # - name: Verify result - # ansible.builtin.assert: - # that: - # - _result_backup_plan_info.backup_plans | length == 1 - - - name: Create an AWS Backup plan - command: aws backup create-backup-plan --backup-plan "{\"BackupPlanName\":\"{{ backup_plan_name }}\",\"Rules\":[{\"RuleName\":\"DailyBackups\",\"ScheduleExpression\":\"cron(0 5 ? * * *)\",\"StartWindowMinutes\":60,\"TargetBackupVaultName\":\"{{ backup_vault_name }}\",\"CompletionWindowMinutes\":1440,\"Lifecycle\":{\"DeleteAfterDays\":35}}]}" - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" + - _result_create_backup_vault.changed + + - name: Create an AWS Backup plan for the selection to target + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: DailyBackups + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 5 ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 1440 + tags: + environment: test register: _result_create_backup_plan - - set_fact: - backup_plan_id: "{{ (_result_create_backup_plan.stdout | from_json).BackupPlanId }}" + - name: Set backup plan ID + ansible.builtin.set_fact: + backup_plan_id: "{{ _result_create_backup_plan.backup_plan_id }}" - name: Create an AWS Backup selection (check_mode) amazon.aws.backup_selection: @@ -82,6 +58,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "daily" + conditions: + string_like: + - condition_key: "aws:ResourceTag/environment" + condition_value: "prod*" check_mode: true register: _create_result_backup_selection @@ -99,6 +79,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "daily" + conditions: + string_like: + - condition_key: "aws:ResourceTag/environment" + condition_value: "prod*" register: _create_result_backup_selection - name: Verify result @@ -118,6 +102,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "daily" + conditions: + string_like: + - condition_key: "aws:ResourceTag/environment" + condition_value: "prod*" register: _create_result_backup_selection - name: Verify result @@ -155,6 +143,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "weekly" + conditions: + string_not_equals: + - condition_key: "aws:ResourceTag/environment" + condition_value: "dev" check_mode: true register: _modify_result_backup_selection @@ -172,6 +164,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "weekly" + conditions: + string_not_equals: + - condition_key: "aws:ResourceTag/environment" + condition_value: "dev" register: _modify_result_backup_selection - name: Verify result @@ -191,6 +187,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "weekly" + conditions: + string_not_equals: + - condition_key: "aws:ResourceTag/environment" + condition_value: "dev" register: _modify_result_backup_selection - name: Verify result @@ -260,19 +260,10 @@ state: absent ignore_errors: true - # - name: Delete AWS Backup plan created during this test - # amazon.aws.backup_plan: - # backup_plan_name: "{{ backup_plan_name }}" - # state: absent - # ignore_errors: true - - name: Delete AWS Backup plan created during this test - command: aws backup delete-backup-plan --backup-plan-id "{{ backup_plan_id }}" - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent ignore_errors: true - name: Delete AWS Backup vault created during this test @@ -280,3 +271,9 @@ backup_vault_name: "{{ backup_vault_name }}" state: absent ignore_errors: true + + - name: Delete IAM role created during this test + community.aws.iam_role: + name: "{{ backup_iam_role_name }}" + state: absent + ignore_errors: true diff --git a/tests/integration/targets/backup_vault/tasks/main.yml b/tests/integration/targets/backup_vault/tasks/main.yml index 9397c60afc6..24f27b7d597 100644 --- a/tests/integration/targets/backup_vault/tasks/main.yml +++ b/tests/integration/targets/backup_vault/tasks/main.yml @@ -28,6 +28,7 @@ - backup_vault_result_check is changed - backup_vault_result_check.vault.backup_vault_name == backup_vault_name - backup_vault_result_check.vault.encryption_key_arn == "" + - backup_vault_result_check.vault.tags.environment == "dev" - name: Create an AWS Backup Vault amazon.aws.backup_vault: @@ -42,7 +43,8 @@ - backup_vault_result is changed - backup_vault_result.vault.backup_vault_name == backup_vault_name - backup_vault_result.vault.encryption_key_arn == key.key_arn - + - backup_vault_result.vault.tags.environment == "dev" + - name: Get backup vault info by passing the vault name amazon.aws.backup_vault_info: backup_vault_names: @@ -53,6 +55,7 @@ that: - vault_info.backup_vaults[0].backup_vault_name == backup_vault_result.vault.backup_vault_name - vault_info.backup_vaults[0].backup_vault_arn == backup_vault_result.vault.backup_vault_arn + - vault_info.backup_vaults[0].tags.environment == "dev" - name: Create an AWS Backup Vault - idempotency check amazon.aws.backup_vault: @@ -67,7 +70,180 @@ - backup_vault_result_idem is not changed - backup_vault_result_idem.vault.backup_vault_name == backup_vault_name - backup_vault_result_idem.vault.encryption_key_arn == key.key_arn - + - backup_vault_result_idem.vault.tags.environment == "dev" + + - name: Update AWS Backup Vault - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + check_mode: true + register: backup_vault_update_check_mode_result + + - name: Verify check mode update result + assert: + that: + - backup_vault_update_check_mode_result is changed + - backup_vault_update_check_mode_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_check_mode_result.vault.encryption_key_arn == key.key_arn + - backup_vault_update_check_mode_result.vault.tags.environment == "dev" + - backup_vault_update_check_mode_result.vault.tags.owner == "ansible" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: update_check_mode_vault_info + + - name: Verify backup vault was not updated in check mode + ansible.builtin.assert: + that: + - update_check_mode_vault_info.backup_vaults[0].backup_vault_name == vault_info.backup_vaults[0].backup_vault_name + - update_check_mode_vault_info.backup_vaults[0].encryption_key_arn == vault_info.backup_vaults[0].encryption_key_arn + - update_check_mode_vault_info.backup_vaults[0].backup_vault_arn == vault_info.backup_vaults[0].backup_vault_arn + - update_check_mode_vault_info.backup_vaults[0].tags == vault_info.backup_vaults[0].tags + + - name: Update AWS Backup Vault + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + register: backup_vault_update_result + + - name: Verify update result + ansible.builtin.assert: + that: + - backup_vault_update_result is changed + - backup_vault_update_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_result.vault.encryption_key_arn == key.key_arn + - backup_vault_update_result.vault.tags.environment == "dev" + - backup_vault_update_check_mode_result.vault.tags.owner == "ansible" + + - name: Get updated backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_vault_info + + - name: Verify backup vault was updated + ansible.builtin.assert: + that: + - updated_vault_info.backup_vaults[0].backup_vault_name == vault_info.backup_vaults[0].backup_vault_name + - updated_vault_info.backup_vaults[0].backup_vault_arn == vault_info.backup_vaults[0].backup_vault_arn + - updated_vault_info.backup_vaults[0].encryption_key_arn == vault_info.backup_vaults[0].encryption_key_arn + - updated_vault_info.backup_vaults[0].tags != vault_info.backup_vaults[0].tags + + - name: Update AWS Backup Vault - idempotency + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + register: backup_vault_update_idempotency_result + + - name: Verify idempotency update result + ansible.builtin.assert: + that: + - backup_vault_update_idempotency_result is not changed + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_vault_info_idempotency + + - name: Verify backup vault was not updated + ansible.builtin.assert: + that: + - updated_vault_info_idempotency.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - updated_vault_info_idempotency.backup_vaults[0].backup_vault_arn == updated_vault_info.backup_vaults[0].backup_vault_arn + - updated_vault_info_idempotency.backup_vaults[0].encryption_key_arn == updated_vault_info.backup_vaults[0].encryption_key_arn + - updated_vault_info_idempotency.backup_vaults[0].tags == updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + check_mode: true + register: backup_vault_update_tags_check_mode_result + + - name: Verify check mode tag update result + ansible.builtin.assert: + that: + - backup_vault_update_tags_check_mode_result is changed + - backup_vault_update_tags_check_mode_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_tags_check_mode_result.vault.tags | length == 1 + - backup_vault_update_tags_check_mode_result.vault.tags.environment == "test" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: update_tags_check_mode_info + + - name: Verify backup vault tags were not updated in check mode + ansible.builtin.assert: + that: + - update_tags_check_mode_info.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - update_tags_check_mode_info.backup_vaults[0].tags == updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + register: backup_vault_update_tags_result + + - name: Verify update tags with purge result + ansible.builtin.assert: + that: + - backup_vault_update_tags_result is changed + - backup_vault_update_tags_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_tags_result.vault.tags | length == 1 + - backup_vault_update_tags_result.vault.tags.environment == "test" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_tags_info + + - name: Verify backup vault tags were updated + ansible.builtin.assert: + that: + - updated_tags_info.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - updated_tags_info.backup_vaults[0].tags != updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge - idempotency + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + register: backup_vault_update_tags_idempotency_result + + - name: Verify update tags with purge idempotency result + ansible.builtin.assert: + that: + - backup_vault_update_tags_idempotency_result is not changed + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_tags_idempotency_info + + - name: Verify no changes were made + ansible.builtin.assert: + that: + - updated_tags_idempotency_info.backup_vaults[0].backup_vault_name == updated_tags_info.backup_vaults[0].backup_vault_name + - updated_tags_idempotency_info.backup_vaults[0].tags == updated_tags_info.backup_vaults[0].tags + always: - name: Delete AWS Backup Vault created during this test amazon.aws.backup_vault: diff --git a/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml b/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml index 0047831a7ba..9243742d786 100644 --- a/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml +++ b/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml @@ -52,6 +52,27 @@ that: - event_rule_input_transformer_output.changed + - name: Create cloudwatch event rule with input transformer (idempotent) + cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}" + description: "Event rule with input transformer configuration" + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input_transformer: + input_paths_map: + instance: "$.detail.instance-id" + state: "$.detail.state" + input_template: " is in state " + register: event_rule_input_transformer_output + + - name: Assert that no changes were made to the rule + assert: + that: + - event_rule_input_transformer_output is not changed + - name: Create cloudwatch event rule with inputs cloudwatchevent_rule: name: "{{ input_event_name }}" diff --git a/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml b/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml index 74943a3fdbf..7aa2c196067 100644 --- a/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml +++ b/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml @@ -60,6 +60,39 @@ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' + - name: "Add a second interface (check_mode=true)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + - id: "{{ eni_b.interface.id }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: add_interface_check_mode + check_mode: true + + - name: Validate task reported changed + assert: + that: + - add_interface_check_mode is changed + + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' + - name: "Add a second interface" ec2_instance: state: present @@ -75,9 +108,25 @@ wait: false register: add_interface until: add_interface is not failed - ignore_errors: yes + ignore_errors: true retries: 10 + - name: Validate that the instance has now 2 interfaces attached + block: + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 2' + + when: add_interface is successful + - name: "Make instance in the testing subnet created in the test VPC(check mode)" ec2_instance: state: present diff --git a/tests/integration/targets/ec2_instance_info/aliases b/tests/integration/targets/ec2_instance_info/aliases new file mode 100644 index 00000000000..704e2295951 --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/aliases @@ -0,0 +1,4 @@ +time=1m +cloud/aws +ec2_instance_info +ec2_instance diff --git a/tests/integration/targets/ec2_instance_info/defaults/main.yml b/tests/integration/targets/ec2_instance_info/defaults/main.yml new file mode 100644 index 00000000000..5ec6ddfc542 --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/defaults/main.yml @@ -0,0 +1,7 @@ +--- +ec2_instance_type: 't2.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-info' +ec2_instance_name: "{{ resource_prefix }}-test-instance-info" +ec2_instance_user_data: | + packages: + - httpd diff --git a/tests/integration/targets/ec2_instance_info/meta/main.yml b/tests/integration/targets/ec2_instance_info/meta/main.yml new file mode 100644 index 00000000000..aefa59ca473 --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/meta/main.yml @@ -0,0 +1,4 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env diff --git a/tests/integration/targets/ec2_instance_info/tasks/main.yml b/tests/integration/targets/ec2_instance_info/tasks/main.yml new file mode 100644 index 00000000000..2e3aba8095f --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make instance in the testing subnet created in the test VPC" + ec2_instance: + state: present + name: "{{ ec2_instance_name }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: '{{ subnet_b_az }}' + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + user_data: "{{ ec2_instance_user_data }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + + - name: "Gather {{ ec2_instance_name }} info" + ec2_instance_info: + filters: + "tag:Name": "{{ ec2_instance_name }}" + include_attributes: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + register: _instance_info + + - name: Validate that returned value contains required attributes + assert: + that: + - _instance_info.instances | length > 0 + - '"attributes" in _instance_info.instances[0]' + # instance type + - _instance_info.instances[0].attributes.instance_type.value == ec2_instance_type + # User data + - _instance_info.instances[0].attributes.user_data.value | b64decode == ec2_instance_user_data + # kernel + - '"kernel_id" in _instance_info.instances[0].attributes' + # Ram disk + - '"ramdisk_id" in _instance_info.instances[0].attributes' + # Disable API termination + - not (_instance_info.instances[0].attributes.disable_api_termination.value | bool) + # Instance Initiated Shutdown Behavior + - '"instance_initiated_shutdown_behavior" in _instance_info.instances[0].attributes' + # Root Device Name + - _instance_info.instances[0].attributes.root_device_name.value == "/dev/sda1" + # Block Device Mapping + - '"block_device_mappings" in _instance_info.instances[0].attributes' + - _instance_info.instances[0].attributes.block_device_mappings[0].device_name == "/dev/sda1" + - '"ebs" in _instance_info.instances[0].attributes.block_device_mappings[0]' + # Product Codes + - '"product_codes" in _instance_info.instances[0].attributes' + # Source Dest Check + - _instance_info.instances[0].attributes.source_dest_check.value | bool + # GroupSet + - '"groups" in _instance_info.instances[0].attributes' + # Ebs Optimized + - not (_instance_info.instances[0].attributes.ebs_optimized.value | bool) + # Sriov Net Support + - '"sriov_net_support" in _instance_info.instances[0].attributes' + # Enclave Options + - not (_instance_info.instances[0].attributes.enclave_options.enabled | bool) diff --git a/tests/integration/targets/ec2_security_group/tasks/main.yml b/tests/integration/targets/ec2_security_group/tasks/main.yml index c4db855d2b7..0f1b5fe433f 100644 --- a/tests/integration/targets/ec2_security_group/tasks/main.yml +++ b/tests/integration/targets/ec2_security_group/tasks/main.yml @@ -32,15 +32,15 @@ Description: "Created by ansible-test" register: vpc_result #TODO(ryansb): Update CI for VPC peering permissions - #- include: ./multi_account.yml - - include: ./diff_mode.yml - - include: ./numeric_protos.yml - - include: ./rule_group_create.yml - - include: ./egress_tests.yml - - include: ./icmp_verbs.yml - - include: ./data_validation.yml - - include: ./multi_nested_target.yml - - include: ./group_info.yml + #- include_tasks: ./multi_account.yml + - include_tasks: ./diff_mode.yml + - include_tasks: ./numeric_protos.yml + - include_tasks: ./rule_group_create.yml + - include_tasks: ./egress_tests.yml + - include_tasks: ./icmp_verbs.yml + - include_tasks: ./data_validation.yml + - include_tasks: ./multi_nested_target.yml + - include_tasks: ./group_info.yml # ============================================================ - name: test state=absent (CHECK MODE) @@ -137,7 +137,7 @@ # ============================================================ - name: tests IPv6 with the default VPC - include: ./ipv6_default_tests.yml + include_tasks: ./ipv6_default_tests.yml when: default_vpc - name: test IPv6 with a specified VPC diff --git a/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml index 501cccaf9b0..d0b519d3d1e 100644 --- a/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml +++ b/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml @@ -411,6 +411,47 @@ - create_ngw.vpc_id == vpc_id + # ============================================================ + - name: Create new NAT gateway when eip_address is invalid and create_default is true + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: "192.0.2.1" + state: present + wait: yes + default_create: true + register: _nat_gateway + + - name: + assert: + that: + - _nat_gateway.changed + - '"create_time" in _nat_gateway' + - '"nat_gateway_addresses" in _nat_gateway' + - '"nat_gateway_id" in _nat_gateway' + - _nat_gateway.nat_gateway_id.startswith("nat-") + - '"state" in _nat_gateway' + - _nat_gateway.state == 'available' + - '"subnet_id" in _nat_gateway' + - _nat_gateway.subnet_id == subnet_id + - '"tags" in _nat_gateway' + - '"vpc_id" in _nat_gateway' + - _nat_gateway.vpc_id == vpc_id + + - name: Fail when eip_address is invalid and create_default is false + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: "192.0.2.1" + state: present + wait: yes + register: _fail_nat_gateway + ignore_errors: true + + - name: Assert fail because eip_address is invalid + assert: + that: + _fail_nat_gateway.msg == "EIP 192.0.2.1 does not exist" + + # ============================================================ - name: Fetch NAT gateway by ID (list) ec2_vpc_nat_gateway_info: @@ -919,6 +960,7 @@ - create_ngw.changed - create_ngw.connectivity_type == 'private' - '"create_time" in create_ngw' + - '"allocation_id" not in create_ngw.nat_gateway_addresses[0]' - name: 'set facts: NAT gateway ID' set_fact: diff --git a/tests/integration/targets/iam_instance_profile/aliases b/tests/integration/targets/iam_instance_profile/aliases new file mode 100644 index 00000000000..e381149ffed --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/aliases @@ -0,0 +1,3 @@ +cloud/aws + +iam_instance_profile_info diff --git a/tests/integration/targets/iam_instance_profile/defaults/main.yml b/tests/integration/targets/iam_instance_profile/defaults/main.yml new file mode 100644 index 00000000000..28290d57856 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/defaults/main.yml @@ -0,0 +1,12 @@ +--- +test_profile: '{{ resource_prefix }}-iam-ip' +test_profile_complex: '{{ resource_prefix }}-iam-ip-complex' +test_role: '{{ resource_prefix }}-iam-ipr' +test_path: '/{{ resource_prefix }}-ip/' +safe_managed_policy: 'AWSDenyAll' + +test_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value diff --git a/tests/integration/targets/iam_instance_profile/files/deny-assume.json b/tests/integration/targets/iam_instance_profile/files/deny-assume.json new file mode 100644 index 00000000000..73e87715862 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/files/deny-assume.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { "Service": "ec2.amazonaws.com" }, + "Effect": "Deny" + } + ] +} diff --git a/tests/integration/targets/iam_instance_profile/meta/main.yml b/tests/integration/targets/iam_instance_profile/meta/main.yml new file mode 100644 index 00000000000..32cf5dda7ed --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/iam_instance_profile/tasks/main.yml b/tests/integration/targets/iam_instance_profile/tasks/main.yml new file mode 100644 index 00000000000..f628e617117 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/tasks/main.yml @@ -0,0 +1,510 @@ +--- +# Tests for iam_instance_profile and iam_instance_profile_info +# + +- name: "Setup AWS connection info" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + - community.general + block: + # =================================================================== + # Prepare + + - name: "Prepare IAM Roles" + iam_role: + state: present + name: "{{ item }}" + path: "{{ test_path }}" + create_instance_profile: True + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + managed_policies: + - "{{ safe_managed_policy }}" + wait: True + loop: + - "{{ test_role }}" + - "{{ test_role }}-2" + + # =================================================================== + # Test + + # =================================================================== + + - name: "Create minimal Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create minimal Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create minimal Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Create minimal Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - include_tasks: 'tags.yml' + + # =================================================================== + + - name: "Add role to Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Add role to Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Add role to Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Add role to Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # ===== + + - name: "Replace role on Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Replace role on Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Replace role on Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Replace role on Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # ===== + + - name: "Remove role from Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Remove role from Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Remove role from Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Remove role from Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: "Create complex Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create complex Instance Profile" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create complex Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Create complex Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: "List all Instance Profiles (no filter)" + iam_instance_profile_info: + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length >= 4 + - '"{{ test_role }}" in profile_names' + - '"{{ test_role }}-2" in profile_names' + - '"{{ test_profile }}" in profile_names' + - '"{{ test_profile_complex }}" in profile_names' + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == "{{ test_role }}-2" + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: "List all Instance Profiles (filter by path)" + iam_instance_profile_info: + path: "{{ test_path }}" + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length == 3 + - '"{{ test_role }}" in profile_names' + - '"{{ test_role }}-2" in profile_names' + - '"{{ test_profile_complex }}" in profile_names' + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == "{{ test_role }}-2" + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: "List all Instance Profiles (filter by name - complex)" + iam_instance_profile_info: + name: "{{ test_profile_complex }}" + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length == 1 + - '"{{ test_profile_complex }}" in profile_names' + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"tags" in complex_profile' + - complex_profile.tags == test_tags + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == "{{ test_role }}-2" + - '"tags" in complex_profile.roles[0]' + - complex_profile.roles[0].tags == {} + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: "List an Instance Profile (filter by name)" + iam_instance_profile_info: + name: "{{ test_profile }}" + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length == 1 + - '"arn" in simple_profile' + - '"create_date" in simple_profile' + - '"instance_profile_id" in simple_profile' + - '"instance_profile_name" in simple_profile' + - simple_profile.instance_profile_name == test_profile + - '"path" in simple_profile' + - simple_profile.path == "/" + - '"tags" in simple_profile' + - simple_profile.tags == {} + - '"roles" in simple_profile' + - simple_profile.roles | length == 0 + vars: + simple_profile: '{{ profile_info.iam_instance_profiles[0] }}' + + # =================================================================== + + - name: "Delete minimal Instance Profile (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete minimal Instance Profile" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete minimal Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Delete minimal Instance Profile - Idempotent" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: "Delete complex Instance Profile (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete complex Instance Profile" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete complex Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Delete complex Instance Profile - Idempotent" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + always: + # =================================================================== + # Cleanup + +# - name: "iam_instance_profile_info after Role deletion" +# iam_instance_profile_info: +# ignore_errors: true + + - name: "Delete Instance Profiles" + iam_instance_profile: + state: absent + name: "{{ item }}" + ignore_errors: true + loop: + - "{{ test_profile }}" + - "{{ test_profile_complex }}" + - "{{ test_role }}" + - "{{ test_role }}-2" + + - name: "Remove IAM Roles" + iam_role: + state: absent + name: "{{ item }}" + path: "{{ test_path }}" + delete_instance_profile: yes + ignore_errors: true + loop: + - "{{ test_role }}" + - "{{ test_role }}-2" + +# - name: "iam_role_info after Role deletion" +# iam_role_info: +# path: "{{ test_path }}" +# ignore_errors: true diff --git a/tests/integration/targets/iam_instance_profile/tasks/tags.yml b/tests/integration/targets/iam_instance_profile/tasks/tags.yml new file mode 100644 index 00000000000..5185301b8a4 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/tasks/tags.yml @@ -0,0 +1,298 @@ +- vars: + first_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + 'New Key with Spaces': Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + final_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + module_defaults: + amazon.aws.iam_instance_profile: + name: '{{ test_profile }}' + amazon.aws.iam_instance_profile_info: + name: "{{ test_profile }}" + block: + + # ============================================================ + # + + - name: (check) add tags + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: add tags + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + + - name: get instance profile facts + iam_instance_profile_info: {} + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == first_tags + + - name: (check) add tags - IDEMPOTENCY + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: add tags - IDEMPOTENCY + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: {} + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == first_tags + + # ============================================================ + + - name: (check) modify tags with purge + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: modify tags with purge + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == second_tags + + - name: (check) modify tags with purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: modify tags with purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == second_tags + + # ============================================================ + + - name: (check) modify tags without purge + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: modify tags without purge + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + - name: (check) modify tags without purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: modify tags without purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + # ============================================================ + + - name: (check) No change to tags without setting tags + iam_instance_profile: + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is not changed + + - name: No change to tags without setting tags + iam_instance_profile: + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + # ============================================================ + + - name: (check) remove all tags + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: remove all tags + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == {} + + - name: (check) remove all tags - IDEMPOTENCY + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: remove all tags - IDEMPOTENCY + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + - name: get instance profile + iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == {} diff --git a/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml b/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml index 98b6ba937ae..e411eac615f 100644 --- a/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml +++ b/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml @@ -8,4 +8,4 @@ security_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - - include: ./test_{{ inventory_hostname }}.yml + - include_tasks: ./test_{{ inventory_hostname }}.yml diff --git a/tests/integration/targets/lambda/tasks/main.yml b/tests/integration/targets/lambda/tasks/main.yml index 58a0d7d4ff4..2fcadb1c42c 100644 --- a/tests/integration/targets/lambda/tasks/main.yml +++ b/tests/integration/targets/lambda/tasks/main.yml @@ -469,25 +469,29 @@ - lambda_infos_mappings.functions[0].policy is undefined - lambda_infos_mappings.functions[0].tags is undefined - # More Lambda update tests - - name: test state=present with all nullable variables explicitly set to null - lambda: - name: '{{lambda_function_name}}' - runtime: '{{ lambda_python_runtime }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - handler: '{{ lambda_python_handler }}' - description: - vpc_subnet_ids: - vpc_security_group_ids: - environment_variables: - dead_letter_arn: - register: result - - name: assert lambda remains as before - assert: - that: - - result is not failed - - result.changed == False + # 2023-06-27 + # An explicit "None" is no longer permitted by Ansible for parameters with a type other than "raw" + # (None is still the implicit value for "not set") + # + # # More Lambda update tests + # - name: test state=present with all nullable variables explicitly set to null + # lambda: + # name: '{{lambda_function_name}}' + # runtime: '{{ lambda_python_runtime }}' + # role: '{{ lambda_role_name }}' + # zip_file: '{{zip_res.dest}}' + # handler: '{{ lambda_python_handler }}' + # description: + # vpc_subnet_ids: + # vpc_security_group_ids: + # environment_variables: + # dead_letter_arn: + # register: result + # - name: assert lambda remains as before + # assert: + # that: + # - result is not failed + # - result.changed == False - name: test putting an environment variable changes lambda (check mode) lambda: diff --git a/tests/integration/targets/s3_object/tasks/copy_recursively.yml b/tests/integration/targets/s3_object/tasks/copy_recursively.yml new file mode 100644 index 00000000000..1e31020d714 --- /dev/null +++ b/tests/integration/targets/s3_object/tasks/copy_recursively.yml @@ -0,0 +1,152 @@ +- name: Test copy recursively object from one bucket to another one. + block: + - name: Create S3 bucket + s3_bucket: + name: "{{ item }}" + state: present + with_items: + - "{{ bucket_src }}" + - "{{ bucket_dst }}" + + - name: Create object into bucket + s3_object: + bucket: "{{ bucket_src }}" + mode: put + content: "{{ item.content }}" + object: "{{ item.object }}" + with_items: "{{ s3_objects }}" + + - name: Copy all objects from source bucket into destination bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + check_mode: true + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure no object were found into bucket + assert: + that: + - _objects.s3_keys | length == 0 + + # Test: Copy all objects using prefix + - name: copy object using prefix + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + prefix: "file" + register: _copy_with_prefix + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure objects with prefix 'file' were copied into bucket + assert: + that: + - _copy_with_prefix is changed + - _objects.s3_keys | length == 3 + - '"file1.txt" in _objects.s3_keys' + - '"file2.txt" in _objects.s3_keys' + - '"file3.txt" in _objects.s3_keys' + + # Test: Copy all objects using prefix (idempotency) + - name: copy object using prefix (idempotency) + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + prefix: "file" + register: _copy_with_prefix_idempotency + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure objects with prefix 'file' were copied into bucket + assert: + that: + - _copy_with_prefix_idempotency is not changed + - _objects.s3_keys | length == 3 + - '"file1.txt" in _objects.s3_keys' + - '"file2.txt" in _objects.s3_keys' + - '"file3.txt" in _objects.s3_keys' + + # Test: Copy all objects from source bucket + - name: copy all objects from source bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + register: _copy_all + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure all objects were copied into bucket + assert: + that: + - _copy_all is changed + - _objects.s3_keys | length == 5 + + # Test: Copy all objects from source bucket (idempotency) + - name: copy all objects from source bucket (idempotency) + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + register: _copy_all_idempotency + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure number of copied objects remains the same. + assert: + that: + - _copy_all_idempotency is not changed + - _objects.s3_keys | length == 5 + + vars: + bucket_src: "{{ bucket_name }}-recursive-src" + bucket_dst: "{{ bucket_name }}-recursive-dst" + s3_objects: + - object: file1.txt + content: | + some content for file1.txt + - object: file2.txt + content: | + some content for file2.txt + - object: file3.txt + content: | + some content for file3.txt + - object: testfile.py + content: "This is a sample text file" + - object: another.txt + content: "another file to create into bucket" + + always: + - include_tasks: delete_bucket.yml + with_items: + - "{{ bucket_src }}" + - "{{ bucket_dst }}" diff --git a/tests/integration/targets/s3_object/tasks/main.yml b/tests/integration/targets/s3_object/tasks/main.yml index 5603ddc1ec1..c5006322545 100644 --- a/tests/integration/targets/s3_object/tasks/main.yml +++ b/tests/integration/targets/s3_object/tasks/main.yml @@ -1039,6 +1039,8 @@ - "'tags' in result" - (result.tags | length) == 0 + - include_tasks: copy_recursively.yml + always: - name: delete temporary files diff --git a/tests/unit/module_utils/arn/test_parse_aws_arn.py b/tests/unit/module_utils/arn/test_parse_aws_arn.py index 49375a855de..cc4b4057656 100644 --- a/tests/unit/module_utils/arn/test_parse_aws_arn.py +++ b/tests/unit/module_utils/arn/test_parse_aws_arn.py @@ -24,6 +24,8 @@ region="us-east-1", account_id="123456789012", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), dict( partition="aws-gov", @@ -31,6 +33,8 @@ region="us-gov-east-1", account_id="123456789012", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), dict( partition="aws-cn", @@ -38,6 +42,8 @@ region="us-east-1", account_id="123456789012", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), # Start the account ID with 0s, it's a 12 digit *string*, if someone treats # it as an integer the leading 0s can disappear. @@ -47,21 +53,93 @@ region="us-east-1", account_id="000123000123", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), # S3 doesn't "need" region/account_id as bucket names are globally unique - dict(partition="aws", service="s3", region="", account_id="", resource="bucket/object"), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="bucket/object", + resource_type="bucket", + resource_id="object", + ), # IAM is a 'global' service, so the ARNs don't have regions - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="policy/foo/bar/PolicyName"), dict( - partition="aws", service="iam", region="", account_id="123456789012", resource="instance-profile/ExampleProfile" + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="policy/foo/bar/PolicyName", + resource_type="policy", + resource_id="foo/bar/PolicyName", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="instance-profile/ExampleProfile", + resource_type="instance-profile", + resource_id="ExampleProfile", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="root", + resource_type=None, + resource_id="root", ), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="root"), # Some examples with different regions - dict(partition="aws", service="sqs", region="eu-west-3", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="us-gov-east-1", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="sa-east-1", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="ap-northeast-2", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="ca-central-1", account_id="123456789012", resource="example-queue"), + dict( + partition="aws", + service="sqs", + region="eu-west-3", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="us-gov-east-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="sa-east-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="ap-northeast-2", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="ca-central-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), # Some more unusual service names dict( partition="aws", @@ -69,6 +147,8 @@ region="us-east-1", account_id="123456789012", resource="stateful-rulegroup/ExampleDomainList", + resource_type="stateful-rulegroup", + resource_id="ExampleDomainList", ), dict( partition="aws", @@ -76,6 +156,8 @@ region="us-east-1", account_id="123456789012", resource="group/group-name", + resource_type="group", + resource_id="group-name", ), # A special case for resources AWS curate dict( @@ -84,22 +166,90 @@ region="us-east-1", account_id="aws-managed", resource="stateful-rulegroup/BotNetCommandAndControlDomainsActionOrder", + resource_type="stateful-rulegroup", + resource_id="BotNetCommandAndControlDomainsActionOrder", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="aws", + resource="policy/AWSDirectConnectReadOnlyAccess", + resource_type="policy", + resource_id="AWSDirectConnectReadOnlyAccess", ), - dict(partition="aws", service="iam", region="", account_id="aws", resource="policy/AWSDirectConnectReadOnlyAccess"), # Examples merged in from test_arn.py - dict(partition="aws-us-gov", service="iam", region="", account_id="0123456789", resource="role/foo-role"), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="user/dev/*"), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="user:test"), - dict(partition="aws-cn", service="iam", region="", account_id="123456789012", resource="user:test"), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="user"), - dict(partition="aws", service="s3", region="", account_id="", resource="my_corporate_bucket/*"), - dict(partition="aws", service="s3", region="", account_id="", resource="my_corporate_bucket/Development/*"), + dict( + partition="aws-us-gov", + service="iam", + region="", + account_id="0123456789", + resource="role/foo-role", + resource_type="role", + resource_id="foo-role", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user/dev/*", + resource_type="user", + resource_id="dev/*", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user:test", + resource_type="user", + resource_id="test", + ), + dict( + partition="aws-cn", + service="iam", + region="", + account_id="123456789012", + resource="user:test", + resource_type="user", + resource_id="test", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user", + resource_type=None, + resource_id="user", + ), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="my_corporate_bucket/*", + resource_type="my_corporate_bucket", + resource_id="*", + ), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="my_corporate_bucket/Development/*", + resource_type="my_corporate_bucket", + resource_id="Development/*", + ), dict( partition="aws", service="rds", region="es-east-1", account_id="000000000000", resource="snapshot:rds:my-db-snapshot", + resource_type="snapshot", + resource_id="rds:my-db-snapshot", ), dict( partition="aws", @@ -107,6 +257,8 @@ region="us-east-1", account_id="012345678901", resource="changeSet/Ansible-StackName-c6884247ede41eb0", + resource_type="changeSet", + resource_id="Ansible-StackName-c6884247ede41eb0", ), ] diff --git a/tests/unit/module_utils/arn/test_validate_aws_arn.py b/tests/unit/module_utils/arn/test_validate_aws_arn.py new file mode 100644 index 00000000000..d730ee6372c --- /dev/null +++ b/tests/unit/module_utils/arn/test_validate_aws_arn.py @@ -0,0 +1,217 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn + +arn_test_inputs = [ + # Just test it's a valid ARN + ("arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", True, None), + # Bad ARN + ("arn:was:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", False, None), + # Individual options + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"partition": "aws"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"partition": "aws-cn"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"service": "outposts"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"service": "iam"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"region": "us-east-1"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"region": "us-east-2"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"account_id": "123456789012"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"account_id": "111111111111"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource": "outpost/op-1234567890abcdef0"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource": "outpost/op-11111111111111111"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource_type": "outpost"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource_type": "notpost"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource_id": "op-1234567890abcdef0"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource_id": "op-11111111111111111"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + True, + {"resource_type": "stateMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + False, + {"resource_type": "nopeMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + True, + {"resource_id": "HelloWorldStateMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + False, + {"resource_id": "CruelWorldStateMachine"}, + ), + # All options + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws-cn", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "iam", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-2", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "111111111111", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-11111111111111111", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "notpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-11111111111111111", + }, + ), +] + + +@pytest.mark.parametrize("arn, result, kwargs", arn_test_inputs) +def test_validate_aws_arn(arn, result, kwargs): + kwargs = kwargs or {} + assert validate_aws_arn(arn, **kwargs) == result diff --git a/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py b/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py index 66bf66acecb..0bd9a284e81 100644 --- a/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py +++ b/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py @@ -51,7 +51,7 @@ def __init__(self): @pytest.fixture def ec2_instance(monkeypatch): - monkeypatch.setattr(ec2_instance_module, "parse_aws_arn", lambda arn: None) + monkeypatch.setattr(ec2_instance_module, "validate_aws_arn", lambda arn, service, resource_type: None) monkeypatch.setattr(ec2_instance_module, "module", MagicMock()) ec2_instance_module.module.fail_json.side_effect = FailJsonException() ec2_instance_module.module.fail_json_aws.side_effect = FailJsonException() @@ -60,7 +60,7 @@ def ec2_instance(monkeypatch): def test_determine_iam_role_arn(params_object, ec2_instance, monkeypatch): # Revert the default monkey patch to make it simple to try passing a valid ARNs - monkeypatch.setattr(ec2_instance, "parse_aws_arn", utils_arn.parse_aws_arn) + monkeypatch.setattr(ec2_instance, "validate_aws_arn", utils_arn.validate_aws_arn) # Simplest example, someone passes a valid instance profile ARN arn = ec2_instance.determine_iam_role("arn:aws:iam::123456789012:instance-profile/myprofile") diff --git a/tests/unit/plugins/modules/test_ec2_metadata_facts.py b/tests/unit/plugins/modules/test_ec2_metadata_facts.py index e43b00769e3..10c4a341adc 100644 --- a/tests/unit/plugins/modules/test_ec2_metadata_facts.py +++ b/tests/unit/plugins/modules/test_ec2_metadata_facts.py @@ -1,6 +1,7 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +import gzip import io import pytest from unittest.mock import MagicMock @@ -59,3 +60,41 @@ def test_fetch_recusive(m_fetch_url, ec2_instance): ] ec2_instance.fetch("http://169.254.169.254/latest/meta-data/") assert ec2_instance._data == {"http://169.254.169.254/latest/meta-data/whatever/my-key": "my-value"} + + +@patch(module_name + ".fetch_url") +def test__fetch_user_data_compressed(m_fetch_url, ec2_instance): + user_data = b"""Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/cloud-config +Mime-Version: 1.0 + +packages: ['httpie'] + +--MIMEBOUNDARY-- +""" + + m_fetch_url.return_value = (io.BytesIO(gzip.compress(user_data)), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/user-data") == user_data.decode("utf-8") + + +@patch(module_name + ".fetch_url") +def test__fetch_user_data_plain(m_fetch_url, ec2_instance): + user_data = b"""Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/cloud-config +Mime-Version: 1.0 + +packages: ['httpie'] + +--MIMEBOUNDARY-- +""" + + m_fetch_url.return_value = (io.BytesIO(user_data), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/user-data") == user_data.decode("utf-8") diff --git a/tests/unit/plugins/modules/test_s3_object.py b/tests/unit/plugins/modules/test_s3_object.py index c8d3fc4fcf3..863001335a3 100644 --- a/tests/unit/plugins/modules/test_s3_object.py +++ b/tests/unit/plugins/modules/test_s3_object.py @@ -17,26 +17,22 @@ @patch(module_name + ".paginated_list") def test_list_keys_success(m_paginated_list): - module = MagicMock() s3 = MagicMock() m_paginated_list.return_value = ["delete.txt"] - s3_object.list_keys(module, s3, "a987e6b6026ab04e4717", "", "", 1000) - - assert m_paginated_list.call_count == 1 - module.exit_json.assert_called_with(msg="LIST operation complete", s3_keys=["delete.txt"]) + assert ["delete.txt"] == s3_object.list_keys(s3, "a987e6b6026ab04e4717", "", "", 1000) + m_paginated_list.assert_called_once() @patch(module_name + ".paginated_list") def test_list_keys_failure(m_paginated_list): - module = MagicMock() s3 = MagicMock() m_paginated_list.side_effect = botocore.exceptions.BotoCoreError with pytest.raises(s3_object.S3ObjectFailure): - s3_object.list_keys(module, s3, "a987e6b6026ab04e4717", "", "", 1000) + s3_object.list_keys(s3, "a987e6b6026ab04e4717", "", "", 1000) @patch(module_name + ".delete_key")